', 'exec')\n except (SyntaxError, TypeError, UnicodeDecodeError):\n return False\n\n\ndef fix_file(filename, opts, output=sys.stdout):\n tmp_source = read_from_filename(filename)\n\n # Add missing newline (important for diff)\n tmp_newline = find_newline(tmp_source)\n if tmp_source == tmp_source.rstrip(tmp_newline):\n tmp_source += tmp_newline\n\n fix = FixPEP8(filename, opts, contents=tmp_source)\n fixed_source = fix.fix()\n original_source = copy.copy(fix.original_source)\n tmp_filename = filename\n if not pep8 or opts.in_place:\n encoding = detect_encoding(filename)\n for _ in range(opts.pep8_passes):\n if fixed_source == tmp_source:\n break\n tmp_source = copy.copy(fixed_source)\n if not pep8:\n tmp_filename = tempfile.mkstemp()[1]\n fp = open_with_encoding(tmp_filename, encoding=encoding, mode='w')\n fp.write(fixed_source)\n fp.close()\n fix = FixPEP8(tmp_filename, opts, contents=tmp_source)\n fixed_source = fix.fix()\n if not pep8:\n os.remove(tmp_filename)\n del tmp_filename\n del tmp_source\n\n if opts.diff:\n new = StringIO(''.join(fix.source))\n new = new.readlines()\n output.write(_get_difftext(original_source, new, filename))\n elif opts.in_place:\n fp = open_with_encoding(filename, encoding=encoding,\n mode='w')\n fp.write(fixed_source)\n fp.close()\n else:\n output.write(fixed_source)\n\n\ndef parse_args(args):\n \"\"\"Parse command-line options.\"\"\"\n parser = OptionParser(usage='Usage: autopep8 [options] '\n '[filename [filename ...]]',\n version=\"autopep8: %s\" % __version__,\n description=__doc__,\n prog='autopep8')\n parser.add_option('-v', '--verbose', action='store_true', dest='verbose',\n help='print verbose messages')\n parser.add_option('-d', '--diff', action='store_true', dest='diff',\n help='print the diff for the fixed source')\n parser.add_option('-i', '--in-place', action='store_true',\n help='make changes to files in place')\n parser.add_option('-r', '--recursive', action='store_true',\n help='run recursively; must be used with --in-place or '\n '--diff')\n parser.add_option('-p', '--pep8-passes',\n default=PEP8_PASSES_MAX, type='int',\n help='maximum number of additional pep8 passes'\n ' (default: %default)')\n parser.add_option('--ignore', default='',\n help='do not fix these errors/warnings (e.g. E4,W)')\n parser.add_option('--select', default='',\n help='select errors/warnings (e.g. E4,W)')\n opts, args = parser.parse_args(args)\n\n if not len(args):\n parser.error('incorrect number of arguments')\n\n if len(args) > 1 and not (opts.in_place or opts.diff):\n parser.error('autopep8 only takes one filename as argument '\n 'unless the \"--in-place\" or \"--diff\" options are '\n 'used')\n\n if opts.recursive and not (opts.in_place or opts.diff):\n parser.error('--recursive must be used with --in-place or --diff')\n\n if opts.in_place and opts.diff:\n parser.error('--in-place and --diff are mutually exclusive')\n\n return opts, args\n\n\ndef main():\n \"\"\"Tool main.\"\"\"\n opts, args = parse_args(sys.argv[1:])\n if opts.in_place or opts.diff:\n filenames = list(set(args))\n else:\n assert len(args) == 1\n assert not opts.recursive\n filenames = args[:1]\n\n while filenames:\n name = filenames.pop(0)\n if opts.recursive and os.path.isdir(name):\n for root, directories, children in os.walk(name):\n filenames += [os.path.join(root, f) for f in children\n if f.endswith('.py') and\n not f.startswith('.')]\n for d in directories:\n if d.startswith('.'):\n directories.remove(d)\n else:\n if opts.verbose:\n sys.stderr.write('[file:%s]\\n' % name)\n try:\n fix_file(name, opts)\n except (UnicodeDecodeError, UnicodeEncodeError, IOError) as error:\n sys.stderr.write(str(error) + '\\n')\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"funningboy/vim","sub_path":"pylibs/autopep8.py","file_name":"autopep8.py","file_ext":"py","file_size_in_byte":55134,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"75"}
+{"seq_id":"21366558893","text":"from fastapi.params import Header\n\nfrom pim_vi.controller import Controller\nfrom pim_vi.model import Product\nfrom pim_vi.model.product_model import ProductModel\n\n\nclass ProductController(Controller):\n def __init__(self, app):\n super().__init__()\n app.post(\"/product\")(self.create_product)\n app.get(\"/product/{product_id}\")(self.get_product)\n app.get(\"/product\")(self.get_products)\n app.delete(\"/product/{product_id}\")(self.delete_product)\n app.put(\"/product/{product_id}\")(self.update_product)\n\n async def create_product(self, product: Product, authorization: str = Header(None)):\n try:\n user_id = super().get_id_from_token(authorization)\n async with ProductModel() as m:\n product = await m.create_product(product)\n if not product:\n return {\"message\": \"Product not created\"}\n return product\n except Exception as e:\n print(e)\n return {\"message\": \"Erro ao criar produto\", \"error\": e}\n\n async def get_products(self):\n try:\n async with ProductModel() as m:\n products = await m.get_all_products()\n if not products:\n return {\"message\": \"Products not found\"}\n return products\n except Exception as e:\n print(e)\n return {\"message\": \"Erro ao buscar produtos\", \"error\": e}\n\n async def get_product(self, product_id: str, authorization: str = Header(None)):\n try:\n user_id = super().get_id_from_token(authorization)\n async with ProductModel() as m:\n product = await m.get_product(product_id)\n if not product:\n return {\"message\": \"Product not found\"}\n return product\n except Exception as e:\n print(e)\n return {\"message\": \"Erro ao buscar produto\", \"error\": e}\n\n async def delete_product(self, product_id: str, authorization: str = Header(None)):\n try:\n user_id = super().get_id_from_token(authorization)\n async with ProductModel() as m:\n product = await m.delete_product(product_id)\n if not product:\n return {\"message\": \"Product not found\"}\n return product\n except Exception as e:\n print(e)\n return {\"message\": \"Erro ao buscar produto\", \"error\": e}\n\n async def update_product(self, product_id: str, product: Product, authorization: str = Header(None)):\n try:\n user_id = super().get_id_from_token(authorization)\n async with ProductModel() as m:\n product = await m.update_product_by_id(product_id, product)\n if not product:\n return {\"message\": \"Product not found\"}\n return product\n except Exception as e:\n print(e)\n return {\"message\": \"Erro ao buscar produto\", \"error\": e}\n","repo_name":"EdSL88/PimVIBack","sub_path":"pim_vi/controller/product_controller.py","file_name":"product_controller.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"72119589681","text":"from GestiondeClientes import GestiondeClientes\r\nfrom GestiondeProductos import GestiondeProductos\r\nfrom GestiondePagos import GestiondePagos\r\nfrom GestiondeEnvios import GestiodeEnvios\r\n\r\n\r\nclass GestiondeVentas:\r\n #Primero creamos el constructor para establecer los atributos necesarios\r\n\r\n def __init__ (self,cliente,productos,cantidad,pago,envio,subtotal,descuentos,iva,igtf,total,fecha):\r\n self.cliente=cliente\r\n self.productos=productos\r\n self.cantidad=cantidad\r\n self.pago=pago\r\n self.envio=envio\r\n self.subtotal=subtotal\r\n self.descuentos=descuentos\r\n self.iva=iva\r\n self.igtf=igtf\r\n self.total=total\r\n self.fecha=fecha\r\n self.CompraProductos=[]\r\n self.ventas=[]\r\n self.gestiondeClientes=GestiondeClientes\r\n\r\n \r\n #Una serie de metodos para simplificar los calculos y no crear un solo metodo largo\r\n def calcular_descuento(self):\r\n if GestiondeClientes.tipo == \"Juridico\":\r\n self.descuentos=self.subtotal * 0.05\r\n else:\r\n self.descuentos= 0\r\n \r\n def calcular_iva(self):\r\n self.calcular_subtotal\r\n self.iva= self.subtotal * 0.16\r\n \r\n def calcular_igtf(self):\r\n if GestiondePagos.tipodepago == \"Efectivo\" or self.pago==\"Tarjeta internacional\" or self.pago==\"Zelle\":\r\n self.igtf= self.subtotal * 0.03\r\n else:\r\n self.igtf= 0\r\n\r\n\r\n def calcular_subtotal(self): \r\n self.subtotal= sum(i[1] * i[2] for i in self.CompraProductos)\r\n\r\n\r\n def calcular_Total(self):\r\n self.calcular_descuento() \r\n self.calcular_iva()\r\n self.calcular_igtf() \r\n self.total = self.subtotal - self.descuentos + self.iva + self.igtf\r\n\r\n #Metodo de validacion para simplificar donde sea necesario\r\n\r\n def validacionCI(self,CI):\r\n while not CI.isdigit():\r\n print(\"Introduzca una cedula valida\")\r\n CI=input(\"CI: \")\r\n \r\n #En este metodo primero llamamos a otras clases para poder acceder a atributos de ellas\r\n #Elaboro unos condicionales para poder buscar y utilizar los objetos deseados y mensajes de error\r\n #por si no se encuentra registrado\r\n\r\n\r\n def RegistrarVenta(self, clienteB, prodB):\r\n clienteB=GestiondeClientes()\r\n prodB=GestiondeProductos()\r\n print(\"Desea buscar el cliente mediante cedula o email?\")\r\n decisionCI=input(\"CI (1) correo(2)\")\r\n while decisionCI not in (\"1\" ,\"2\"):\r\n print(\"Introduzca una opcion valida\")\r\n print(\"Desea buscar el cliente mediante cedula o email?\")\r\n decisionCI=input(\"CI (1) correo(2)\")\r\n if decisionCI==\"1\":\r\n cedulacliente=input(\"CI del cliente: \\n\")\r\n self.validacionCI(cedulacliente)\r\n cliente=clienteB.buscar_CI(cedulacliente)\r\n\r\n elif decisionCI==\"2\":\r\n correocliente=input(\"Correo del cliente:\\n\")\r\n cliente=clienteB.buscar_correo(correocliente)\r\n if cliente is None:\r\n print(\"El cliente no se encuentra registrado.\")\r\n return\r\n\r\n #Se dan a conocer los atributos del cliente que se establecieron para evitar confusiones\r\n\r\n print(f\"La compra es a nombre de {cliente.nombre} {cliente.apellido}\")\r\n \r\n #Ahora ocurre lo mismo pero con los productos con ligeros cambios, ya que se debe decidir la cantidad\r\n #a comprar de cada uno \r\n\r\n print(\"Desea agregar un producto?\")\r\n AgregarProductos=input(\"Si (1) No(2)\")\r\n while AgregarProductos ==\"1\":\r\n print(\"Que producto va a comprar\")\r\n producto=input(\"Nombre del producto:\")\r\n prod=prodB.buscar_name(producto)\r\n if prod is None:\r\n print(\"No disponemos del producto\")\r\n return\r\n else:\r\n cantidad=int(input(\"Cantidad a comprar:\"))\r\n precio=prod.precio\r\n self.CompraProductos.append([prod, cantidad, precio])\r\n AgregarProductos=input(\"Si (1) No(2)\")\r\n\r\n #Se accede al modulo GestiondePagos para que sea una compra consistente, es decir, que no sea cada uno\r\n #independiente sino que se relacionen dando un mejor funcionamiento, estableciendo mismos valores \r\n #y simplificando el codigo\r\n\r\n\r\n self.pago=GestiondePagos(self.cliente)\r\n \r\n self.pago.RegistrarPago()\r\n self.pago.RegistrarFechaActual()\r\n self.fecha = self.pago.fecha\r\n\r\n #Lo mismo con el modulo GestiondeEnvios\r\n self.envio=GestiodeEnvios()\r\n self.envio.RegistrarEnvio() \r\n\r\n\r\n #Se ejecutan los calculos utilizando los valores de gestiondePago conjunto a los de los productos\r\n \r\n self.calcular_Total()\r\n self.total+=self.envio.costoenvio\r\n\r\n #Se determina el costo, se muestra el total con y sin descuento, en caso de no tener sera el mismo\r\n\r\n print(f\"Subtotal antes de descuentos: {self.subtotal}\")\r\n print(f\"El total de la compra sería: {self.total}\")\r\n\r\n #Y para finalizar se pregunta si realmente se quiere comprar, ya que pueden ocurrir confusiones con los precios\r\n #anadiendo a dos listas distintas, la de pagos y la de ventas. Imprimiendo los datos generales y la factura\r\n #En caso de no confirmar simplmente el proceso llega hasta ahi sin anadir objetos y entregando un mensaje de cancelacion\r\n\r\n print(\"Confirma la venta?\")\r\n confirmar = input(\"Si (1) No(2): \")\r\n while confirmar not in (\"1\", \"2\"):\r\n confirmar = input(\"Introduzca '1' para confirmar o '2' para cancelar: \")\r\n \r\n if confirmar == \"1\": \r\n print(\"Compra realizada con exito\")\r\n self.listPagos.append({\r\n \"cliente\": self.cliente,\r\n \"productos\": self.CompraProductos, \r\n \"total\": self.total\r\n })\r\n print(\"Su factura\")\r\n self.Factura()\r\n\r\n self.listVentas.append(self) \r\n\r\n else: \r\n print(\"Venta cancelada.\")\r\n \r\n \r\n\r\n #Metodo para mostrar los atributos\r\n\r\n def Factura(self):\r\n print(f'''\r\n cliente: {self.cliente.nombre}\r\n productos: {self.CompraProductosproductos}\r\n cantidad: {self.cantidad}\r\n total: {self.total}\r\n direccion: {self.gestiondeClientes.direccion}\r\n fecha: {self.fecha} \r\n ''')\r\n\r\n #Metodos de busqueda como los de GestiondeProductos (revisar esos docstrings para explicacion)\r\n\r\n def SearchClientesVentas(self):\r\n buscar= False\r\n nombre=input(\"Nombre del producto: \\n\")\r\n for x in self.listClientes:\r\n if x.nombre==nombre:\r\n buscar=True\r\n print(\"Se encuentra en la lista\")\r\n x.Factura()\r\n if not buscar:\r\n print(\"El cliente no está registrado\")\r\n\r\n def SearchFechaVentas(self):\r\n buscar = False\r\n fecha_buscar = input(\"Introduzca la fecha a buscar (mm/dd/aaaa, hh:mm:ss): \") \r\n for venta in self.ventas:\r\n if venta.fecha == fecha_buscar:\r\n buscar = True\r\n print(\"Se ha encontrado una coincidencia\")\r\n venta.Factura() \r\n if not buscar:\r\n print(\"No se han encontrado coincidencias\")\r\n\r\n def SearchMontoVentas(self):\r\n buscar=False\r\n MontoABuscar=input(\"Introduzca el monto a buscar: \")\r\n for i in self.listPagos:\r\n if i.total==MontoABuscar:\r\n buscar=True\r\n print(\"Existen coincidencias\")\r\n i.Factura()\r\n if not buscar:\r\n print(\"No hay compras con ese monto\")\r\n","repo_name":"Msuso23/Proyecto","sub_path":"GestiondeVentas.py","file_name":"GestiondeVentas.py","file_ext":"py","file_size_in_byte":7785,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"24493788872","text":"\ndef __set__(setting):\n local_settings = 'travtogether_server.settings.local'\n production_settings = 'backend.settings.production'\n local_host = \"127.0.0.1\"\n production_host = \"travtogether-server.herokuapp.com\"\n if setting == \"local\":\n return local_settings, local_host\n elif setting == \"production\":\n return production_settings, production_host\n\n\nCURRENT_SETTING, CURRENT_HOST = __set__(\"local\")","repo_name":"yousebastian1618/TravTogether_Server","sub_path":"travtogether_server/current_settings.py","file_name":"current_settings.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"17364621831","text":"from unittest import TestCase, mock\nimport restaurantcli as rcli\nimport restaurantratings as rr\n\n\nclass CLITestCase(TestCase):\n \"\"\"Tests for our Restaurant CLI\"\"\"\n\n def setUp(self):\n self.ratings = rr.RestaurantRatings([\n rr.RestaurantRating(\"The Tavern\", 10),\n rr.RestaurantRating(\"Gastropub\", 7)\n ])\n\n def test_get_ratings(self):\n # Create mock method that returns ratings attr\n\n def mock_get_ratings_from_file(filename):\n return self.ratings\n\n with mock.patch(\n \"restaurantratings.RestaurantRatings.get_ratings_from_file\",\n mock_get_ratings_from_file) as m:\n\n ratings = rr.RestaurantRatings.get_ratings_from_file(\"mockfile\")\n\n self.assertEqual(len(ratings.ratings), 2)\n self.assertEqual(ratings.ratings[0].name, \"The Tavern\")\n self.assertEqual(ratings.ratings[0].rating, 10)\n self.assertEqual(ratings.ratings[1].name, \"Gastropub\")\n self.assertEqual(ratings.ratings[1].rating, 7)\n\n\nclass RestaurantRatingTests(TestCase):\n \"\"\"Tests for individual restaurant rating objects\"\"\"\n\n def test_init(self):\n rating = rr.RestaurantRating(\"The Tavern\", 10)\n self.assertEqual(rating.name, \"The Tavern\")\n self.assertEqual(rating.rating, 10)\n\n def test_update_rating(self):\n rating = rr.RestaurantRating(\"The Tavern\", 10)\n rating.update_rating(2)\n self.assertEqual(rating.rating, 2)\n\n def test_eq_true(self):\n rating_1 = rr.RestaurantRating(\"The Tavern\", 10)\n rating_2 = rr.RestaurantRating(\"Gastropub\", 10)\n self.assertEqual(rating_1, rating_2)\n\n def test_eq_false(self):\n rating_1 = rr.RestaurantRating(\"The Tavern\", 10)\n rating_2 = rr.RestaurantRating(\"Gastropub\", 9)\n self.assertNotEqual(rating_1, rating_2)\n\n def test_lt_true(self):\n rating_1 = rr.RestaurantRating(\"The Tavern\", 9)\n rating_2 = rr.RestaurantRating(\"Gastropub\", 10)\n self.assertLess(rating_1, rating_2)\n\n def test_lt_false(self):\n rating_1 = rr.RestaurantRating(\"The Tavern\", 10)\n rating_2 = rr.RestaurantRating(\"Gastropub\", 9)\n self.assertFalse(rating_1 < rating_2)\n\n\nclass RestaurantRatingsTests(TestCase):\n \"\"\"Tests for the RestaurantRatingsTests umbrella objects\"\"\"\n\n def setUp(self):\n \"\"\"Set up sub-objects for RestaurantRatings tests\"\"\"\n self.rrobj = rr.RestaurantRatings([\n rr.RestaurantRating(\"The Tavern\", 10),\n rr.RestaurantRating(\"Gastropub\", 9),\n rr.RestaurantRating(\"Snack Shack\", 5)\n ])\n\n def test_init(self):\n rrobj = rr.RestaurantRatings()\n self.assertEqual(len(rrobj.ratings), 0)\n self.assertEqual(rrobj.ratings, [])\n\n def test_init_with_source(self):\n rrobj = rr.RestaurantRatings([rr.RestaurantRating(\"The Tavern\", 10)])\n self.assertEqual(len(rrobj.ratings), 1)\n self.assertEqual(rrobj.ratings[0].name, \"The Tavern\")\n self.assertEqual(rrobj.ratings[0].rating, 10)\n\n def test_add_rating(self):\n self.rrobj.add_rating(\"Kimberly's\", 5)\n self.assertEqual(len(self.rrobj.ratings), 4)\n self.assertEqual(self.rrobj.ratings[3].name, \"Kimberly's\")\n self.assertEqual(self.rrobj.ratings[3].rating, 5)\n\n def test_get_rating_by_name(self):\n restaurant_rating = self.rrobj.get_rating_by_name(\"The Tavern\")\n self.assertEqual(restaurant_rating.name, \"The Tavern\")\n self.assertEqual(restaurant_rating.rating, 10)\n\n def test_remove_rating_by_name(self):\n self.rrobj.remove_rating_by_name(\"The Tavern\")\n self.assertEqual(len(self.rrobj.ratings), 2)\n self.assertEqual(self.rrobj.ratings[0].name, \"Gastropub\")\n self.assertEqual(self.rrobj.ratings[1].name, \"Snack Shack\")\n\n def test_remove_rating_by_index(self):\n self.rrobj.remove_rating_by_index(1)\n self.assertEqual(len(self.rrobj.ratings), 2)\n self.assertEqual(self.rrobj.ratings[1].name, \"Snack Shack\")\n self.assertEqual(self.rrobj.ratings[1].rating, 5)\n\n def get_rating_by_name_error(self):\n with self.assertRaises(NoSuchRestaurantError):\n self.rrobj.get_rating_by_name(\"Not a Restaurant\")\n\n\nclass MockedFileRestaurantRatingsTests(TestCase):\n \"\"\"Mock our save to file test case\"\"\"\n\n def test_save_to_file(self):\n mockobj = mock.mock_open()\n rrobj = rr.RestaurantRatings([\n rr.RestaurantRating(\"The Tavern\", 10)\n ])\n\n with mock.patch(\"builtins.open\", mockobj) as mock_f:\n rrobj.save_to_file(\"mockfile\")\n\n mockobj.assert_called_once_with(\"mockfile\", \"w\")\n mockobj().write.assert_has_calls([\n mock.call(\"The Tavern\"),\n mock.call(\":\"),\n mock.call(\"10\"),\n ])\n\n\nif __name__ == \"__main__\":\n\n import unittest\n\n unittest.main()\n","repo_name":"nykimberly/playground-python","sub_path":"hb/w2/d6_weekend-review/restaurant-cli/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"14781968895","text":"#Добавление вложений в confluence, а также обновление метаданных\n\n# -*- coding: utf-8 -*-\nfrom __future__ import with_statement\nimport sys, string, re, os\nimport pyodbc\nimport xmlrpc.client\nimport codecs\n\nURL ='' \nusername = '' #логин\npwd = '' #пароль\nclient = xmlrpc.client.ServerProxy(URL+'/rpc/xmlrpc') #API.XMLRPC\nauthToken = client.confluence2.login(username,pwd) #API.авторизация\n\nPAGE_PRN_ID = '885102819'\n\nsql_5_0 = \"SELECT FILE_PATH,PAGE_PRN_ID, FILE_EXT,FULL_NAME FROM DEV_DB_STG.S_FILE_POWER_DESIGNER where FILE_EXT in ('png','gif','css','js') order by FILE_NAME, FILE_EXT\"\nprint(sql_5_0)\nconnect = pyodbc.connect('DSN=TD')\ncursor = connect.cursor()\ncursor.execute(sql_5_0)\nconnect.commit()\ntable_5 = cursor.fetchall()\nfor row in table_5:\n print(row[0],row[2])\n if row[2] == 'gif':\n contentType = 'image/gif'\n elif row[2] == 'png':\n contentType = 'image/png'\n elif row[2] == 'css':\n contentType = 'text/css'\n elif row[2] == 'js':\n contentType = 'application/x-javascript'\n path = row[0]\n f = open(path,'rb')\n data = f.read()\n filename = row[3]\n page_id = row[1]\n page = client.confluence2.getPage(authToken, str(page_id))\n client.confluence2.removeAttachment(authToken, str(page_id),str(filename))\n print('file remove')\n if page is None:\n exit(\"Could not find page \" + spacekey + \":\" + str(page['title']))\n attachment = {}\n attachment['fileName'] =os.path.basename(filename)\n attachment['contentType'] = contentType\n print(page['id'])\n client.confluence2.addAttachment(authToken, page['id'], attachment, xmlrpc.client.Binary(data))\nf.close\ncursor.close\nconnect.close()\n#-----------------------------------------------------------------------------------------------------\npage_attachment = '885102819'\ntext = client.confluence2.getAttachments(authToken,page_attachment)\nconnect = pyodbc.connect('DSN=TD')\ncursor = connect.cursor()\nfor i in range(len(text)):\n sql_6_1 = \"UPDATE DEV_DB_STG.S_FILE_POWER_DESIGNER set URL ='\"+str(text[i]['url']) + u\"' where FULL_NAME = '\" + str(text[i]['fileName'])+u\"';\"\n cursor.execute(sql_6_1)\n connect.commit()\n i = i + 1\ncursor.close\nconnect.close()\n","repo_name":"Testudinate/Confluence","sub_path":"05_addAttachment_getAttachments.py","file_name":"05_addAttachment_getAttachments.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"}
+{"seq_id":"17239598628","text":"# Combining weak to strong learners via random forests\n'''\nIntuitively, a random forest can be considered as an ensemble of decision\ntrees. The idea behind the ensemble learning is to combine weak learners\nto build a more robust model, a strong learner, that has a better generalization\nerror and is less susceptible to overfitting.\n\n'''\n# Train a model to classify the different flowers in our Iris dataset\nfrom sklearn import datasets\nimport numpy as np\n\niris = datasets.load_iris()\nX = iris.data[:, [2, 3]]\ny = iris.target\n\nfrom sklearn.cross_validation import train_test_split\n\n# random_state : int or RandomState\n# Pseudo-random number generator state used for random sampling.\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\nX_combined = np.vstack((X_train, X_train))\ny_combined = np.hstack((y_train, y_test))\n\nfrom matplotlib.colors import ListedColormap\nimport matplotlib.pyplot as plt\n\n\ndef plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):\n # setup marker generator and color map\n markers = ('s', 'x', 'o', '^', 'v')\n colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\n cmap = ListedColormap(colors[:len(np.unique(y))])\n # plot the decision surface\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)\n plt.xlim(xx1.min(), xx1.max())\n plt.ylim(xx2.min(), xx2.max())\n # plot all samples\n X_test, y_test = X[test_idx, :], y[test_idx]\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.8, c=cmap(idx), marker=markers[idx], label=cl)\n\n # highlight test samples\n if test_idx:\n X_test, y_test = X[test_idx, :], y[test_idx]\n plt.scatter(X_test[:, 0], X_test[:, 1], c='', alpha=1.0, linewidth=1, marker='o', s=55, label='test set')\n\n\n'''\nWe do not have to construct the random forest classifier from individual decision trees\nby ourselves; there is already an implementation in scikit-learn that we can use:\n''' \nfrom sklearn.ensemble import RandomForestClassifier\nforest = RandomForestClassifier(criterion='entropy', n_estimators=10, random_state=1, n_jobs=2)\nforest.fit(X_train, y_train)\nplot_decision_regions(X_combined, y_combined, classifier=forest, test_idx=range(105, 150))\nplt.xlabel('petal length')\nplt.ylabel('petal width')\nplt.legend(loc='upper left')\nplt.show()\n\n'''\nUsing the preceding code, we trained a random forest from 10 decision trees via the\nn_estimators parameter and used the entropy criterion as an impurity measure to\nsplit the nodes. Although we are growing a very small random forest from a very\nsmall training dataset, we used the n_jobs parameter for demonstration purposes,\nwhich allows us to parallelize the model training using multiple cores of our\ncomputer (here, two).\n\n'''\n","repo_name":"wei-Z/Python-Machine-Learning","sub_path":"self_practice/Chapter 3 Random Forest.py","file_name":"Chapter 3 Random Forest.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"24760604673","text":"#1. В единственной строке записан текст. Для каждого слова из данного текста подсчитайте,\n# сколько раз оно встречалось в этом тексте. Задачу необходимо решить с использованием словаря.\n\ns = input('Enter the string: ')\ns = s.split()\nd = dict()\nfor k in s:\n if k in d:\n d[k] += 1\n else:\n d[k] = 1\nfor k in d:\n print(k,':', d[k])","repo_name":"podloznyi/Studying_Python","sub_path":"HomeWork7/ThirdTask.py","file_name":"ThirdTask.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"}
+{"seq_id":"28018261845","text":"from datetime import datetime\nimport logging\n\nfrom paste.deploy.converters import asbool, asint\nfrom pylons import request, response, tmpl_context as c, config\nfrom pylons.controllers.util import redirect\nfrom pylons.decorators import validate\nfrom sqlalchemy import not_\n\nfrom adhocracy import model\nfrom adhocracy.controllers.event import EventController\nfrom adhocracy.lib import helpers as h\nfrom adhocracy.lib import pager, sorting\nfrom adhocracy.lib.auth import require\nfrom adhocracy.lib.base import BaseController\nfrom adhocracy.lib.staticpage import get_static_page\nfrom adhocracy.lib.templating import render\nfrom adhocracy.lib.util import get_entity_or_abort\n\nfrom proposal import ProposalFilterForm\n\n\nlog = logging.getLogger(__name__)\n\n\nclass RootController(BaseController):\n\n @validate(schema=ProposalFilterForm(), post_only=False, on_get=True)\n def index(self, format='html'):\n require.proposal.index()\n if c.instance:\n redirect(h.entity_url(c.instance))\n\n instances_in_root = asint(\n config.get('adhocracy.startpage.instances.list_length', 0))\n if instances_in_root > 0:\n c.instances = model.Instance.all(limit=instances_in_root)\n elif instances_in_root == -1:\n c.instances = model.Instance.all()\n\n c.page = get_static_page('index')\n\n proposals_number = asint(\n config.get('adhocracy.startpage.proposals.list_length', 0))\n\n if proposals_number > 0:\n proposals = model.Proposal.all_q()\\\n .join(model.Instance).filter(not_(\n model.Instance.key.in_(model.Instance.SPECIAL_KEYS)))\\\n .order_by(model.Proposal.create_time.desc())\n\n c.new_proposals_pager = pager.proposals(\n proposals, size=proposals_number,\n default_sort=sorting.entity_newest,\n enable_pages=False,\n enable_sorts=False)\n else:\n c.new_proposals_pager = None\n\n if asbool(config.get('adhocracy.show_stats_on_frontpage', 'true')):\n c.stats_global = {\n \"members\": model.User.all_q().count(),\n \"comments\": model.Comment.all_q().count(),\n \"proposals\": model.Proposal.all_q().count(),\n \"votes\": model.Vote.all_q().count(),\n }\n\n if format == 'rss':\n return EventController().all(format='rss')\n\n return render('index.html')\n\n #@RequireInstance\n def dispatch_delegateable(self, id):\n dgb = get_entity_or_abort(model.Delegateable, id,\n instance_filter=False)\n redirect(h.entity_url(dgb))\n\n def sitemap_xml(self):\n if c.instance:\n redirect(h.base_url('/sitemap.xml', None))\n c.delegateables = model.Delegateable.all()\n c.change_time = datetime.utcnow()\n response.content_type = \"text/xml\"\n return render(\"sitemap.xml\")\n\n def robots_txt(self):\n response.content_type = \"text/plain\"\n if not c.instance:\n return render(\"robots.txt\")\n return render(\"instance/robots.txt\")\n\n def tutorials(self):\n if 'disable' in request.params:\n name = request.params.get('disable')\n if name == 'ALL':\n h.tutorial.disable(None)\n else:\n h.tutorial.disable(name)\n else:\n h.tutorial.enable()\n","repo_name":"whausen/part","sub_path":"src/adhocracy/controllers/root.py","file_name":"root.py","file_ext":"py","file_size_in_byte":3439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"25090492824","text":"from __future__ import print_function, division\nimport sys\nimport os\nimport bz2\nimport json\nimport cPickle as pickle\nimport numpy as np\nfrom scipy import optimize\nimport matplotlib.pyplot as plt\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# Hawkes Intensity Process model in Python\n\n\ndef get_C(k, alpha=2.016, beta=0.1):\n \"\"\"\n Get parameter capital C.\n :param k: scaling factor for video quality\n :param alpha: power-law exponent of user influence distribution\n :param beta: user influence component\n :return: parameter capital C\n \"\"\"\n return k*(alpha-1)/(alpha-beta-1)\n\n\ndef rand_initialize_weights(n):\n \"\"\"\n Initialize multiple sets of random weights for theta.\n :param n: number of sets of random weights\n :return: n sets of random vectors, in the order of mu, theta, C, c, gamma, eta\n \"\"\"\n ret = []\n for _ in xrange(n):\n rand_mu = np.random.uniform(0, 505.90)\n rand_theta = np.random.uniform(2.3, 67.7)\n rand_C = get_C(np.random.uniform(0, 52.9))\n rand_c = np.random.uniform(0, 4)\n rand_gamma = np.random.uniform(0, 9947)\n rand_eta = np.random.uniform(0, 289.2)\n ret.append(np.array([rand_mu, rand_theta, rand_C, rand_c, rand_gamma, rand_eta]))\n return ret\n\n\ndef time_decay(i, c):\n \"\"\"\n Time decay part for series (tau + c).\n :param i: tau value\n :param c: c value\n :return: abbreviated presentation\n \"\"\"\n return np.arange(1, i+1)[::-1]+c\n\n\ndef predict(params, x):\n \"\"\"\n Predict viewcount with sharecount sequence x.\n Comments are for vector operation style\n :param params: model parameters, mu, theta, C, c, gamma, eta\n :param x: observed sharecount sequence from beginning\n :return: predict value\n \"\"\"\n mu, theta, C, c, gamma, eta = params\n n = len(x)\n x_predict = np.zeros(len(x))\n for i in xrange(n):\n if i == 0:\n x_predict[0] = gamma + mu*x[0]\n else:\n x_predict[i] = eta + mu*x[i] + C*np.sum(x_predict[:i]*(time_decay(i, c)**(-1-theta)))\n return x_predict\n\n\ndef cost_function(params, x, y, num_split=None):\n \"\"\"\n Non-regularized cost function for HIP model\n :param params: model parameters, mu, theta, C, c, gamma, eta\n :param x: observed sharecount\n :param y: observed viewcount\n :param num_split: number of test set\n :return: cost function value\n \"\"\"\n view_predict = predict(params, x)\n cost_vector = view_predict - y\n if num_split is not None:\n cost_vector = cost_vector[-num_split:]\n cost = np.sum(cost_vector ** 2) / 2\n return cost/len(cost_vector)\n\n\ndef grad_descent(params, x, y):\n \"\"\"\n Non-regularized gradient function for HIP model\n :param params: model parameters, mu, theta, C, c, gamma, eta\n :param x: observed sharecount\n :param y: observed viewcount\n :return: cost function value\n \"\"\"\n mu, theta, C, c, gamma, eta = params\n view_predict = predict(params, x)\n n = len(x)\n # partial derivative for mu\n grad_mu_vector = np.zeros(n)\n grad_mu_vector[0] = x[0]\n for i in xrange(1, n):\n grad_mu_vector[i] = x[i] + C*np.sum(grad_mu_vector[:i] * (time_decay(i, c)**(-1-theta)))\n grad_mu = np.sum((view_predict-y)*grad_mu_vector)\n # partial derivative for theta\n grad_theta_vector = np.zeros(n)\n grad_theta_vector[0] = 0\n for i in xrange(1, n):\n grad_theta_vector[i] = C*np.sum((grad_theta_vector[:i]-view_predict[:i]*np.log(time_decay(i, c))) * (time_decay(i, c)**(-1-theta)))\n grad_theta = np.sum((view_predict-y)*grad_theta_vector)\n # partial derivative for C\n grad_C_vector = np.zeros(n)\n grad_C_vector[0] = 0\n for i in xrange(1, n):\n grad_C_vector[i] = np.sum((C*grad_C_vector[:i]+view_predict[:i]) * (time_decay(i, c)**(-1-theta)))\n grad_C = np.sum((view_predict-y)*grad_C_vector)\n # partial derivative for c\n grad_c_vector = np.zeros(n)\n grad_c_vector[0] = 0\n for i in xrange(1, n):\n grad_c_vector[i] = C*np.sum((grad_c_vector[:i]-(1+theta)*view_predict[:i]/time_decay(i, c)) * (time_decay(i, c)**(-1-theta)))\n grad_c = np.sum((view_predict-y)*grad_c_vector)\n # partial derivative for gamma\n grad_gamma_vector = np.zeros(n)\n grad_gamma_vector[0] = 1\n for i in xrange(1, n):\n grad_gamma_vector[i] = C*np.sum(grad_gamma_vector[:i] * (time_decay(i, c)**(-1-theta)))\n grad_gamma = np.sum((view_predict-y)*grad_gamma_vector)\n # partial derivative for eta\n grad_eta_vector = np.zeros(n)\n grad_eta_vector[0] = 0\n for i in xrange(1, n):\n grad_eta_vector[i] = 1 + C*np.sum(grad_eta_vector[:i] * (time_decay(i, c)**(-1-theta)))\n grad_eta = np.sum((view_predict-y)*grad_eta_vector)\n return np.array([grad_mu, grad_theta, grad_C, grad_c, grad_gamma, grad_eta])/n\n\n\ndef train_process(x_train, y_train, initial_weights_sets):\n \"\"\"\n Train HIP with BFGS optimization tool\n :param x_train: train sharecount\n :param y_train: train viewcount\n :param initial_weights_sets: sets of random initial weights\n :return: best optimization parameters\n \"\"\"\n best_params = None\n best_cost = np.inf\n\n for init_idx, initial_weight in enumerate(initial_weights_sets):\n # perform non-regularized optimization with l-bfgs\n optimizer = optimize.minimize(cost_function, initial_weight, jac=grad_descent, method='L-BFGS-B',\n args=(x_train, y_train), bounds=bounds)\n if optimizer.fun < best_cost:\n best_cost = optimizer.fun\n best_params = optimizer.x\n\n return best_params\n\n\ndef plot_func(params, x, y, title, idx):\n \"\"\"\n Plot trend from R-HIP, PY-HIP and AUTO-HIP parameters\n :param params: model parameters, mu, theta, C, c, gamma, eta\n :param x: observed sharecount\n :param y: observed viewcount\n :param title: figure title, YoutubeID\n :param idx: subplot index\n :return:\n \"\"\"\n # visualise sample data\n ax1 = fig.add_subplot(121+idx)\n # ax1 = fig.add_subplot(111)\n ax2 = ax1.twinx()\n ax1.plot(np.arange(1, age+1), y, 'k--', label='observed #views')\n ax2.plot(np.arange(1, age+1), x, 'r-', label='#share')\n ax1.plot((num_train, num_train), (ax1.get_ylim()[0], ax1.get_ylim()[1]), 'k--')\n\n ax1.set_ylim(ymin=max(0, ax1.get_ylim()[0]))\n ax2.set_ylim(ymax=3*max(x))\n ax1.set_xlabel('video age (day)')\n ax1.set_ylabel('Number of views', color='k')\n ax1.tick_params('y', colors='k')\n ax2.set_ylabel('Number of shares', color='r')\n ax2.tick_params('y', colors='r')\n\n mu, theta, C, c, gamma, eta = params\n ax2.text(0.03, 0.85, '$\\mu$={0:.2f}, $\\\\theta$={1:.2f}\\nC={2:.2f}, c={3:.2f}\\n$\\gamma$={4:.2f}, $\\eta$={5:.2f}'\n .format(mu, theta, C, c, gamma, eta), transform=ax1.transAxes)\n ax1.set_title(title)\n\n predidt_x = predict(params, x)\n ax1.plot(np.arange(1, num_train+1), predidt_x[:num_train], 'b-', label='HIP fit')\n ax1.plot(np.arange(num_train+1, age+1), predidt_x[num_train:age], 'm-', label='HIP forecast')\n\n\nif __name__ == '__main__':\n # == == == == == == == == Part 1: Load ACTIVE dataset == == == == == == == == #\n # First time it gets loaded from the JSON format and writes essential fields into a pickle binary file.\n # check if the binary exists\n if not os.path.exists('../data/active-dataset.p'):\n print('--> Converting ACTIVE dataset from JSON format to pickle... might take a while!')\n test_cases = {}\n with bz2.BZ2File('../data/active-dataset.json.bz2') as f:\n dataset = json.loads(f.readline())\n for video in dataset:\n test_cases[video['YoutubeID']] = (video['numShare'], video['dailyViewcount'])\n pickle.dump(test_cases, open('../data/active-dataset.p', 'wb'))\n\n print('--> Loading the ACTIVE dataset from pickle...')\n test_cases = pickle.load(open('../data/active-dataset.p', 'rb'))\n # select 2 videos from paper\n test_vids = ['bUORBT9iFKc', 'cG0nQTYd8ck']\n # or random select 2 videos\n # test_videos = np.array(test_cases.keys())\n # random_index = np.random.randint(0, len(test_videos), 2)\n # test_vids = test_videos[random_index]\n\n # == == == == == == == == Part 2: Set up experiment parameters == == == == == == == == #\n # setting parameters\n fig = plt.figure(figsize=(14, 5))\n age = 120\n num_train = 90\n num_test = 30\n k = 5\n bounds = [(0, None), (0, None), (0, None), (None, None), (0, None), (0, None)]\n\n for tc_idx, vid in enumerate(test_vids):\n print('fitting and forecasting for video: {0}'.format(vid))\n dailyshare, dailyview = test_cases[vid]\n dailyshare = dailyshare[:age]\n dailyview = dailyview[:age]\n\n x_train = dailyshare[: num_train]\n y_train = dailyview[: num_train]\n\n # initialize weights\n # k sets of random params\n initial_weights_sets = rand_initialize_weights(k)\n\n # == == == == == == == == Part 3: Train with closed form gradient == == == == == == == == #\n best_fitted_params = train_process(x_train, y_train, initial_weights_sets)\n\n # == == == == == == == == Part 4: Plot fitting and forecast result == == == == == == == == #\n plot_func(best_fitted_params, dailyshare, dailyview, vid, tc_idx)\n\n plt.tight_layout()\n plt.show()\n","repo_name":"zhangleihan/hip-popularity","sub_path":"pyhip/pyhip.py","file_name":"pyhip.py","file_ext":"py","file_size_in_byte":9292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"75"}
+{"seq_id":"72897155113","text":"# -*- coding: utf-8 -*-\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301, USA.\n#\n# Author: Mauro Soria\n\nfrom optparse import OptionParser, OptionGroup\n\nfrom ...lib.utils.FileUtils import File\nfrom ...lib.utils.FileUtils import FileUtils\nfrom ...lib.utils.DefaultConfigParser import DefaultConfigParser\nfrom ...thirdparty.oset import oset\n\n\nclass ArgumentParser(object):\n def __init__(self, url, wordlist=\"./black/workers/dirsearch/dirsearch_ext/db/dicc.txt\", extensions=None, http_proxy=None, headers=None, user_agent=None, \n user_random_agents=None, cookie=None, threads_count=10, exclude_status_codes=None, path=\"/\",\n force_extensions=False, delay=0, timeout=1, ip_address=None, recursive=False, redirect=False, **kwargs):\n self.script_path = None\n\n self.url = url\n if extensions is None:\n print('No extension specified. You must specify at least one extension')\n exit(0)\n with File(wordlist) as file_wordlist:\n if not file_wordlist.exists():\n print('The wordlist file does not exist')\n exit(0)\n if not file_wordlist.isValid():\n print('The wordlist is invalid')\n exit(0)\n if not file_wordlist.canRead():\n print('The wordlist cannot be read')\n exit(0)\n if http_proxy is not None:\n if http_proxy.startswith('http://'):\n self.proxy = http_proxy\n else:\n self.proxy = 'http://{0}'.format(http_proxy)\n else:\n self.proxy = None\n if headers is not None:\n try:\n self.headers = dict((key.strip(), value.strip()) for (key, value) in (header.split(':', 1)\n for header in headers))\n except Exception as _:\n print('Invalid headers')\n exit(0)\n else:\n self.headers = {}\n\n self.extensions = list(oset([extension.strip() for extension in extensions.split(',')]))\n self.user_agent = user_agent\n self.user_random_agents = user_random_agents\n self.cookie = cookie\n if threads_count < 1:\n print('Threads number must be a number greater than zero')\n exit(0)\n self.threads_count = threads_count\n if exclude_status_codes is not None:\n try:\n self.exclude_status_codes = list(\n oset([int(exclude_status_code.strip()) if exclude_status_code else None for exclude_status_code in\n exclude_status_codes.split(',')]))\n except ValueError:\n self.exclude_status_codes = []\n else:\n self.exclude_status_codes = []\n self.path = path\n self.wordlist = wordlist\n self.force_extensions = force_extensions\n\n self.delay = delay\n self.timeout = timeout\n self.ip_address = ip_address\n self.recursive = recursive\n\n # Well, here we have constants that were used in the original dirsearch,\n # BUT i am too lazy to remove them totally. Moreover, we will probably need them in future\n self.max_retries = 3\n\n self.json_output_file = \"./output\"\n\n self.scan_subdirs = None\n self.exclude_subdirs = None\n\n self.redirect = redirect\n self.request_by_name = True\n\n self.lowercase = False\n\n self.use_random_agents = False\n self.test_fail_path = \"\"\n","repo_name":"c0rv4x/project-black","sub_path":"black/workers/dirsearch/dirsearch_ext/lib/core/ArgumentParser.py","file_name":"ArgumentParser.py","file_ext":"py","file_size_in_byte":4214,"program_lang":"python","lang":"en","doc_type":"code","stars":286,"dataset":"github-code","pt":"72"}
+{"seq_id":"833022372","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport discord\nimport aiohttp\nimport asyncio\n\nfrom discord.ext import commands\n\n\nclass Bigemoji:\n def __init__(self, client):\n self.client = client\n self.session = aiohttp.ClientSession()\n\n\n print(\"Loading Bigemoji...\")\n\n async def on_message(self, message):\n # Message author variables\n user_id = message.author.id\n user_name = message.author\n\n\ndef setup(client):\n client.add_cog(Bigemoji(client))\n","repo_name":"Mehvix/synapsBot","sub_path":"bigemoji.py","file_name":"bigemoji.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"15452895119","text":"\n\nnumbers = []\nfor x in range (1000, 3001):\n split = [int(d) for d in str(x)]\n odd = False\n for y in range (0, len(split)):\n if split[y] % 2 != 0:\n odd = True\n if (odd == False):\n numbers.append(x)\nprint(numbers)","repo_name":"ayusharoraa/python-code","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"16227461783","text":"# -*- encoding:utf-8 -*-\nimport pandas as pd\n# 数据查看\n# 常用reas_csv 常用参数说明 https://www.jianshu.com/p/366aa5daaba9\ndata = pd.read_csv(\"8.Regression/8.Advertising.csv\")\ndata.head()\ndata.info()\n\n\n# 数据处理\n# 缺失值处理\n# 标准化 归一化\n# 编码化 pca降纬\n# 特征提取 文本特征 feature_select文件\n\n\n\n\n\n# 网格搜索\n\n\n\n\n\n\n","repo_name":"keepingoner/ml","sub_path":"template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"30579913840","text":"import pygame\nfrom Clases import Bullet\nimport os\n\nclass SpaceShip(pygame.sprite.Sprite):\n\tdef __init__(self, WIDTH, HEIGHT):\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\tself.SPSHIMAGE = pygame.image.load(os.path.join('Images', 'nave.jpg'))\n\t\tself.EXPIMAGE = pygame.image.load(os.path.join('Images', 'explosion.jpg'))\n\t\t\n\t\tself.RECT = self.SPSHIMAGE.get_rect()\n\t\tself.RECT.centerx = WIDTH/2\n\t\tself.RECT.centery = HEIGHT - 30\n\n\t\tself.SHOOTLIST = []\n\t\tself.HEALTH = True \n\n\t\tself.VEL = 20\n\n\t\tself.SOUND_SHOOT = pygame.mixer.Sound(\"Sounds/shoot.wav\")\n\t\tself.SOUND_EXP = pygame.mixer.Sound(\"Sounds/gameover.wav\")\n\n\n\tdef MovementRight(self):\n\t\tself.RECT.right += self.VEL\n\t\tself.__movement()\n\n\tdef MovementLeft(self):\n\t\tself.RECT.left -= self.VEL\n\t\tself.__movement()\n\n\tdef __movement(self):\n\t\tif self.HEALTH == True:\n\t\t\tif self.RECT.left <= 0:\n\t\t\t\tself.RECT.left = 0\n\t\t\telif self.RECT.left > 870:\n\t\t\t\tself.RECT.left = 840\n\n\tdef Shoot(self, x, y):\n\t\tMY_BULLET = Bullet.Bullet(x,y,\"Images/disparoa.jpg\",True)\n\t\tself.SHOOTLIST.append(MY_BULLET)\n\t\tself.SOUND_SHOOT.play()\n\n\tdef Destruction(self):\n\t\tself.SOUND_EXP.play()\n\t\tself.HEALTH = False\n\t\tself.VEL = 0\n\t\tself.SPSHIMAGE = self.EXPIMAGE\n\n\tdef Draw(self, WIN):\n\t\tWIN.blit(self.SPSHIMAGE, self.RECT)","repo_name":"LauRivero150920/PygameTutorial","sub_path":"Pygame1/Clases/SpaceShip.py","file_name":"SpaceShip.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"36716979280","text":"import streamlit as st\n\nfrom src import home, cleans, people, money, supports\n\ndef init():\n st.session_state.pages = {\n '🏠 Trang chủ': home.main,\n '🧹 Lịch Dọn dẹp': cleans.main,\n '🙍 Quản lý nhân sự': people.main,\n '💰 Quản lý tiền bạc': money.main,\n '🤖 Hỗ trợ': supports.main\n }\n\ndef draw_style():\n st.set_page_config(page_title = 'Quản lý nhà chung',\n page_icon = '🏠',\n layout = 'wide',\n menu_items = {\n 'Get help': 'https://www.facebook.com/chienlady/',\n 'Report a Bug': 'https://www.facebook.com/chienlady/',\n 'About': 'Trang web có mục đích riêng rư **phi lợi nhuận**.'\n })\n\n style = '''\n \n '''\n st.markdown(style, unsafe_allow_html = True)\n\ndef load_page(page_name):\n st.session_state.pages[page_name]()\n\ndef main():\n init()\n draw_style()\n with st.sidebar:\n st.markdown('# Menu quản lý trong nhà')\n st.image('https://media.giphy.com/media/cYxRo3zzej4vTAcd4r/giphy.gif')\n page = st.selectbox('Chọn đích đến',\n ('🏠 Trang chủ',\n '🧹 Lịch Dọn dẹp',\n '🙍 Quản lý nhân sự',\n '💰 Quản lý tiền bạc',\n '🤖 Hỗ trợ'),\n key = 'choose_page')\n load_page(page)\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"ChienLady/st-web","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"10482240579","text":"from __future__ import print_function\nimport os\nimport sys\nimport re\nimport codecs\n\n\ndef processFile(filepath):\n fp = codecs.open(filepath, 'rU', 'iso-8859-2')\n\n content = fp.read()\n \n totalNumberOfEmails = 0\n totalNumberOfDates = 0\n totalNumberOfFloats = 0\n totalNumberOfIntegers = 0\n totalNumberOfAbbreviations = 0\n totalNumberOfSentences = 0 \n authorName = \"\"\n keywords = \"\"\n \n for emailResult in re.finditer(r'[A-Z0-9a-z._%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,4}', content):\n #print(emailResult.group())\n totalNumberOfEmails += 1\n\n for text in re.finditer(r'(.*?)<\\/P>', content, re.M | re.DOTALL):\n #rrrr-dd-mm or rrrr/dd/mm or rrrr.dd.mm\n for dateResult in re.finditer(r'([12]\\d{3}[-./](0[1-9]|[12]\\d|3[01])[-./](0[1-9]|1[0-2]))', text.group(1)):\n #print(dateResult.group())\n totalNumberOfDates += 1\n #dd-mm-rrrr or dd/mm/rrrr or dd.mm.rrrr\n for dateResult in re.finditer(r'((0[1-9]|[12]\\d|3[01])[-./](0[1-9]|1[0-2])[-./][12]\\d{3})', text.group(1)):\n #print(dateResult.group())\n totalNumberOfDates += 1\n for floatResult in re.finditer(r'[-+]?[0-9]*\\.[0-9]+([eE][-+]?[0-9]+)?', text.group(1), re.DOTALL):\n #print(floatResult.group())\n totalNumberOfFloats += 1\n for abbreviationResult in re.finditer(r'\\s([A-Za-z]{1,3}[.])', text.group(1)):\n #print(abbreviationResult.group())\n totalNumberOfAbbreviations += 1\n for integerResult in re.finditer(r'([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])', text.group(1)):\n #print(integerResult.group())\n totalNumberOfIntegers += 1\n for sentencesResult in re.finditer(r'([A-Z][^\\.!?]*[\\.!?])', text.group(1), re.DOTALL | re.M):\n #print(sentencesResult.group())\n totalNumberOfSentences += 1\n\n\n authorResult = re.search(r'', content)\n if authorResult:\n #print(authorResult.group(1))\n authorName = authorResult.group(1)\n \n for keywordResult in re.finditer(r' 0:\n n_racks = order(clothes, rack_capacity)\n\nprint(n_racks)\n","repo_name":"h-dmt/Python_Advanced","sub_path":"1_stacks_queues/Fashion_boutique.py","file_name":"Fashion_boutique.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"17848450500","text":"# Databricks notebook source\n# MAGIC %md\n# MAGIC ### Accessing Azure Data Lake using Access Keys ###\n\n# COMMAND ----------\n\ndbutils.widgets.text(\"p_data_source\", \"\")\nv_data_source = dbutils.widgets.get(\"p_data_source\")\n\n# COMMAND ----------\n\n# MAGIC %run \"./includes/configuration\"\n\n# COMMAND ----------\n\n# MAGIC %run \"./includes/common_functions\"\n\n# COMMAND ----------\n\nhs_account_key = dbutils.secrets.get(scope = 'hs-scopesecret-hs', key = 'storageaccountkey')\n\n# COMMAND ----------\n\nspark.conf.set(\n \"fs.azure.account.key.learningdatabrickstorage.dfs.core.windows.net\",\n hs_account_key\n)\n\n# COMMAND ----------\n\ndisplay(dbutils.fs.ls(f\"{raw_folder_path}\"))\n\n# COMMAND ----------\n\ndisplay(spark.read.csv(f\"{raw_folder_path}/pit_stops.json\"))\n\n# COMMAND ----------\n\nfrom pyspark.sql.types import StructType, StructField, IntegerType, StringType, DoubleType, FloatType, DateType\nfrom pyspark.sql.functions import col, current_timestamp, current_timestamp\n\n# COMMAND ----------\n\npitstops_schema = StructType(fields=[StructField(\"raceId\", IntegerType(), False),\n StructField(\"driverId\", IntegerType(), True),\n StructField(\"stop\", StringType(), True),\n StructField(\"lap\", IntegerType(), True),\n StructField(\"time\", StringType(), True),\n StructField(\"duration\", StringType(), True),\n StructField(\"milliseconds\", IntegerType(), True)\n])\n\n# COMMAND ----------\n\npitstops_df = spark.read \\\n .option(\"multiline\", True) \\\n .schema(pitstops_schema) \\\n .json(f\"{raw_folder_path}/pit_stops.json\")\n\n# COMMAND ----------\n\ndisplay(pitstops_df)\n\n# COMMAND ----------\n\nrenamed_pitstops_df = pitstops_df.withColumnRenamed(\"raceId\", \"race_id\") \\\n .withColumnRenamed(\"driverId\", \"driver_id\") \\\n .withColumn(\"ingestion_date\", current_timestamp())\n\n# COMMAND ----------\n\nrenamed_pitstops_df.printSchema()\n\n# COMMAND ----------\n\nrenamed_pitstops_df.schema.names\n\n# COMMAND ----------\n\nspark.conf.set(\"spark.sql.sources.partitionOverwriteMode\", \"dynamic\")\n\n# COMMAND ----------\n\nresults_dropped_df = results_dropped_df.select(\"result_id\", \"driver_id\", \"constructor_id\", \"number\", \"grid\", \"position\", \"position_text\",\n \"position_order\", \"points\", \"laps\", \"time\", \"milliseconds\", \"fastest_lap\", \"rank\", \"fastest_lap_time\",\n \"fastest_lap_speed\", \"file_date\", \"ingestion_date\", \"race_id\")\n\n# COMMAND ----------\n\n#renamed_pitstops_df.write.mode(\"overwrite\").parquet(f\"{processed_folder_path}/pit_stops\")\nrenamed_pitstops_df.write.mode(\"overwrite\").format(\"parquet\").saveAsTable(\"f1_processed.pit_stops\")\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC SELECT * FROM f1_processed.pit_stops;\n\n# COMMAND ----------\n\ndisplay(spark.read.parquet(f\"{processed_folder_path}/pit_stops\"))\n\n# COMMAND ----------\n\ndbutils.notebook.exit(\"Success\")\n","repo_name":"hserovsk/DataBricksAzure","sub_path":"ingestion/6.ingest_pit_stops.py","file_name":"6.ingest_pit_stops.py","file_ext":"py","file_size_in_byte":3088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"36441523455","text":"#!/usr/bin/env python3\n\nimport sys\nimport random\n\ndef cmdlinearg(name, default=None):\n for arg in sys.argv:\n if arg.startswith(name + \"=\"):\n return arg.split(\"=\")[1]\n assert default is not None, name\n return default\n\nseed = int(cmdlinearg('seed', sys.argv[-1]))\nrandom.seed(seed)\nL = int(cmdlinearg('len'))\n\nprint(''.join(random.choice(['S', 'N', 'B']) for _ in range(L)))\n","repo_name":"Kodsport/swedish-olympiad-2013","sub_path":"final/skolvagen/data/gen_random.py","file_name":"gen_random.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"11483466652","text":"\"\"\"\nCPSC 5520, Seattle University\nAuthor: Leila Mirzaei\nCreated Date: Nov 18th 2022\n\nThis program implements a Chord (described here: https://pdos.csail.mit.edu/papers/chord:sigcomm01/chord_sigcomm.pdf)\nThe Chord software takes the form of a library to be linked with the client and server applications\nthat use it. The application interacts with Chord in two main ways:\n First, Chord provides a lookup(key) algorithm that yields the IP address of the node\n responsible for the key.\n\n Second, the Chord software on each node notifies the application of changes in the set of\n keys that the node is responsible for when a new node joins.\n\nThis program also allows a querier to talk to any arbitrary node in the network to query a\nvalue for a given key or add a key/value pair (with replacement).\n\n\nThe `chord_node.py` module takes a port number of an existing node (or 0 to indicate it should start a new network).\nThen, it joins a new node into the network using a system-assigned port number for itself.\nThe node joins and then listens for incoming connections (from other nodes or queriers).\nFor listening it uses a blocking TCP socket and pickle for the marshaling the messages.\n\"\"\"\nfrom datetime import datetime\n\nimport hashlib\nimport pickle\nimport socket\nimport sys\nimport threading\n\nM = 7 # Network have at most 128 possible nodes (M=7, nodes count = 2^^7 = 128).\nNODES = 2 ** M\nBUF_SZ = 4096 # socket recv arg\nBACKLOG = 100 # socket listen arg\nTEST_BASE = 43500\n\n\nclass ModRange(object):\n \"\"\"\n Range-like object that wraps around 0 at some divisor using modulo arithmetic.\n\n >>> mr = ModRange(1, 4, 100)\n >>> mr\n \n >>> 1 in mr and 2 in mr and 4 not in mr\n True\n >>> [i for i in mr]\n [1, 2, 3]\n >>> mr = ModRange(97, 2, 100)\n >>> 0 in mr and 99 in mr and 2 not in mr and 97 in mr\n True\n >>> [i for i in mr]\n [97, 98, 99, 0, 1]\n >>> [i for i in ModRange(0, 0, 5)]\n [0, 1, 2, 3, 4]\n \"\"\"\n\n def __init__(self, start, stop, divisor):\n self.divisor = divisor\n self.start = start % self.divisor\n self.stop = stop % self.divisor\n # we want to use ranges to make things speedy, but if it wraps around the 0 node, we have to use two\n if self.start < self.stop:\n self.intervals = (range(self.start, self.stop),)\n elif self.stop == 0:\n self.intervals = (range(self.start, self.divisor),)\n else:\n self.intervals = (range(self.start, self.divisor), range(0, self.stop))\n\n def __repr__(self):\n \"\"\" Something like the interval|node charts in the paper \"\"\"\n return ''.format(self.start, self.stop, self.divisor)\n\n def __contains__(self, id):\n \"\"\" Is the given id within this finger's interval? \"\"\"\n for interval in self.intervals:\n if id in interval:\n return True\n return False\n\n def __len__(self):\n total = 0\n for interval in self.intervals:\n total += len(interval)\n return total\n\n def __iter__(self):\n return ModRangeIter(self, 0, -1)\n\n\nclass ModRangeIter(object):\n \"\"\" Iterator class for ModRange \"\"\"\n\n def __init__(self, mr, i, j):\n self.mr, self.i, self.j = mr, i, j\n\n def __iter__(self):\n return ModRangeIter(self.mr, self.i, self.j)\n\n def __next__(self):\n if self.j == len(self.mr.intervals[self.i]) - 1:\n if self.i == len(self.mr.intervals) - 1:\n raise StopIteration()\n else:\n self.i += 1\n self.j = 0\n else:\n self.j += 1\n return self.mr.intervals[self.i][self.j]\n\n\nclass FingerEntry(object):\n \"\"\"\n Row in a finger table.\n\n >>> fe = FingerEntry(0, 1)\n >>> fe\n\n >>> fe.successor = 1\n >>> fe\n\n >>> 1 in fe, 2 in fe\n (True, False)\n >>> FingerEntry(0, 2, 3), FingerEntry(0, 3, 0)\n (, )\n >>> FingerEntry(3, 1, 0), FingerEntry(3, 2, 0), FingerEntry(3, 3, 0)\n (, , )\n >>> fe = FingerEntry(3, 3, 0)\n >>> 7 in fe and 0 in fe and 2 in fe and 3 not in fe\n True\n \"\"\"\n\n def __init__(self, n, k, node=None):\n if not (0 <= n < NODES and 0 < k <= M):\n raise ValueError('invalid finger entry values')\n self.start = (n + 2 ** (k - 1)) % NODES\n self.next_start = (n + 2 ** k) % NODES if k < M else n\n self.interval = ModRange(self.start, self.next_start, NODES)\n self.successor = node # This is the next active node. That is, what would\n # the node be if I wanted to store data in this interval?\n\n def __repr__(self):\n \"\"\" Something like the interval|node charts in the paper \"\"\"\n return ''.format(self.start, self.next_start, self.successor)\n\n def __contains__(self, id):\n \"\"\" Is the given id within this finger's interval? \"\"\"\n return id in self.interval\n\n\ndef sha1_hash(id_string):\n result = hashlib.sha1(id_string.encode())\n return int(result.hexdigest(), 16)\n\n\nclass ChordNode(object):\n def __init__(self, port_number):\n self.port_number = 0 if port_number > 0 else TEST_BASE\n self.if_first = True if port_number == 0 else False\n self.predecessor = None\n self.keys = {} # dictionary to store data\n self.identifier = None\n self.node = None\n self.node_socket = None\n threading.Thread(target=self.start_listening).start()\n self.finger_table = self.initialize_empty_finger_table()\n if self.if_first:\n self.join()\n else:\n endpoint_string = '127.0.0.0 ' + str(port_number)\n node_p = {'number': sha1_hash(endpoint_string) % 2 ** M, 'port': port_number}\n self.join(node_p)\n\n def start_listening(self):\n \"\"\"\n This function starts a listener socket for handling incoming RPC requests.\n It wait for accepting requests.\n When it receives a request, it starts a thread, runs an RPC handler function on the\n new thread, and then returns to listening mode.\n :return:\n \"\"\"\n try:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as listener:\n self.node_socket = listener\n self.node_socket.bind(('localhost', self.port_number))\n self.node_socket.setblocking(True)\n self.node_socket.listen()\n self.port_number = self.node_socket.getsockname()[1]\n print('Node just started listening on port {}.'.format(self.port_number))\n while True:\n client, client_addr = self.node_socket.accept()\n threading.Thread(target=self.handle_rpc, args=(client,)).start()\n except Exception as e:\n print(\"Error occurred in starting a listener server for the node: {}. Error = {}\".format(self.node, e))\n\n def initialize_empty_finger_table(self):\n \"\"\"\n Each node maintains a finger table with (at most) M entries.\n The finger table is used to speed up the routing of query requests.\n For example if node identifier is 1 and M = 4 then node 1 puts 2,3,5,9 in its finger table:\n Finger Table for node 1:\n entry.start = 2 \t entry.stop = 3 \t entry.next_start = 3\n entry.start = 3 \t entry.stop = 5 \t entry.next_start = 5\n entry.start = 5 \t entry.stop = 9 \t entry.next_start = 9\n entry.start = 9 \t entry.stop = 1 \t entry.next_start = 1\n\n Finger Table for node 5:\n entry.start = 6 \t entry.stop = 7 \t entry.next_start = 7\n entry.start = 7 \t entry.stop = 9 \t entry.next_start = 9\n entry.start = 9 \t entry.stop = 13 \t entry.next_start = 13\n entry.start = 13 \t entry.stop = 5 \t entry.next_start = 5\n\n Finger Table for node 13:\n entry.start = 14 \t entry.stop = 15 \t entry.next_start = 15\n entry.start = 15 \t entry.stop = 1 \t entry.next_start = 1\n entry.start = 1 \t entry.stop = 5 \t entry.next_start = 5\n entry.start = 5 \t entry.stop = 13 \t entry.next_start = 13\n\n\n A finger table entry includes both the Chord identifier and the\n address (port number) of the relevant node.\n For each node, this program uses the string of node endpoint\n name (including local host IP and port number) then use SHA-1 to\n hash it (similar to what is suggested in the Stoica, et al. paper).\n :return: an empty finger table\n \"\"\"\n while self.node_socket is None or self.port_number == 0:\n continue\n endpoint_string = '127.0.0.0' + str(self.port_number)\n self.identifier = sha1_hash(endpoint_string)\n self.node = self.identifier % (2 ** M)\n finger_table = [None] + [FingerEntry(self.node, k) for k in range(1, M + 1)] # indexing starts at 1\n return finger_table\n\n def join(self, node_p=None):\n \"\"\"\n This function joins this node to the Chord network.\n It takes an existing node as input, but if the input is None, it indicates that this\n is the first node in the Chord network and does not need to communicate with other\n nodes to notify and update them about its joining.\n When a node n joins the network, certain keys previously assigned to n’s successor\n now become assigned to n. To perform this, Chord node must perform three tasks when\n a node n joins the network:\n 1. Initialize the predecessor and fingers of node n\n 2. Update the fingers and predecessors of existing nodes to reflect\n the addition of n\n 3. Notify the higher layer software so that it can transfer state\n (e.g. values) associated with keys that node is now responsible for.\n\n :param node_p: this is an arbitrary node in the network that this current node\n learns its predecessor and fingers by asking it to look them up.\n In the Stoica, et al. paper it is said that \"We assume that the new node learns\n the identity of an existing Chord node by some external mechanism.\"\n In this program the port of other node in the Chord network is given as input.\n \"\"\"\n if node_p is not None:\n self.init_finger_table(node_p)\n self.update_others() # Move keys in (predecessor, node] from successor\n\n else: # this is the only (first) node joining in the network\n for i in range(1, M + 1):\n self.finger_table[i].successor = {'number': self.node, 'port': self.port_number}\n self.predecessor = {'number': self.node, 'port': self.port_number}\n print('Node {} just joined the Chord network and listening on port {}'.format(self.node, self.port_number))\n print(\"--------------Finger table node {} after join:--------------\".format(self.node))\n self.print_finger_table()\n self.print_node_info()\n\n def call_rpc(self, other_node, method, arg1=None, arg2=None):\n \"\"\"\n This function calls other nodes in the network. It performs the rpc and receives the response.\n\n :param other_node: the address of the other nodes\n :param method: the remote function that should be called via rpc\n :param arg1: the first argument for the remote function\n :param arg2: the second argument for the remote function\n :return: the remote function's return value(s) or output\n \"\"\"\n if other_node == self.port_number:\n result = self.dispatch_rpc(method, arg1, arg2)\n return result\n client = ('localhost', other_node)\n try:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as requester:\n requester.settimeout(1500)\n requester.connect(client)\n requester.sendto(pickle.dumps((method, arg1, arg2)), client)\n # print('calling receive & waiting to get result from {} for method {}'.format(other_node, method))\n response = requester.recv(BUF_SZ)\n return pickle.loads(response)\n except Exception as e:\n print(\"I'm node {}, Exception occurred in call function via rpc: {}\".format(self.node, e))\n exit()\n\n def handle_rpc(self, client):\n \"\"\"\n This function handles rpc requests from other nodes in the network and also clients and responds to them.\n\n :param client: is the network node or client that sent the rpc request.\n \"\"\"\n try:\n rpc = client.recv(BUF_SZ)\n method, arg1, arg2 = pickle.loads(rpc)\n result = self.dispatch_rpc(method, arg1, arg2)\n client.sendall(pickle.dumps(result))\n except Exception as e:\n print(\"I'm node {}, Exception occurred in call function via rpc: {}\".format(self.node, e))\n exit()\n\n def dispatch_rpc(self, method, arg1, arg2):\n if method == 'successor':\n return self.successor\n if method == 'update_finger_table':\n # arg1 = node details(identifier) and arg2 = index\n if arg1 is None or arg2 is None:\n print(\"Argument for calling `update_finger_table` method is not provided.\")\n exit()\n else:\n self.update_finger_table(arg1, arg2)\n elif method == 'find_predecessor':\n if arg1['number'] == self.node:\n return self.predecessor\n else:\n return self.find_predecessor(arg1)\n elif method == 'update_your_predecessor':\n if arg1 is None:\n print(\"Argument for calling `update_your_predecessor` method is not provided.\")\n exit()\n self.predecessor = arg1\n elif method == 'find_successor':\n if arg1 is None:\n print(\"Argument for calling `find_successor` method is not provided.\")\n exit()\n return self.find_successor(arg1)\n elif method == 'closest_preceding_finger':\n if arg1 is None:\n print(\"Argument for calling `closest_preceding_finger` method is not provided.\")\n exit()\n return self.closest_preceding_finger(arg1)\n elif method == 'populate':\n if arg1 is None:\n print(\"Argument for calling `save_data` method is not provided.\")\n return \"The row to populate is either missing or incorrect.\"\n return self.save_data(arg1)\n elif method == 'put_key':\n return self.put_key(arg1, arg2)\n elif method == 'query':\n if arg1 is None or arg2 is None:\n print(\"Arguments for calling `get_data` method is not provided.\")\n return \"Pass the player id and year to find the row.\"\n return self.query_data(arg1, arg2)\n elif method == 'get_key':\n return self.get_key(arg1)\n\n @property\n def successor(self):\n return self.finger_table[1].successor\n\n @successor.setter\n def successor(self, id_dic):\n self.finger_table[1].successor = id_dic\n\n def find_successor(self, id):\n \"\"\" Ask this node to find id's successor = successor(predecessor(id))\"\"\"\n node_p = self.find_predecessor(id)\n return self.call_rpc(node_p['port'], 'successor')\n\n def find_predecessor(self, id):\n node_p_number = self.node\n node_p_successor = self.successor\n node_p_port = self.port_number\n while id not in ModRange(node_p_number + 1, node_p_successor['number'] + 1, NODES):\n node_p = self.call_rpc(node_p_port, 'closest_preceding_finger', id) # np = np.closest_preceding_finger(id)\n\n node_p_number = node_p['number']\n node_p_successor = self.call_rpc(node_p_port, 'successor')\n node_p_port = node_p['port']\n\n return {'number': node_p_number, 'port': node_p_port}\n\n def closest_preceding_finger(self, id):\n for i in range(M, 0, -1):\n if self.finger_table[i].successor['number'] in ModRange(self.node + 1, id, NODES):\n return self.finger_table[i].successor\n return {'number': self.node, 'port': self.port_number}\n\n def init_finger_table(self, node_p):\n \"\"\"\n This function updates the values of entries in the finger table of local node.\n It also updates the predecessor.\n\n :param node_p: is an arbitrary node already in the network\n \"\"\"\n self.finger_table[1].successor = self.call_rpc(node_p['port'], 'find_successor', self.finger_table[\n 1].start) # node_p.find_successor(self.finger_table[1].start)\n self.predecessor = self.call_rpc(self.successor['port'], 'find_predecessor',\n self.successor) # self.predecessor = self.successor.predecessor\n self.call_rpc(self.successor['port'], 'update_your_predecessor',\n {'number': self.node, 'port': self.port_number}) # self.successor.predecessor = self.node\n for i in range(1, M):\n if self.finger_table[i + 1].start in ModRange(self.node, self.finger_table[i].successor['number'], NODES):\n # self.node <= self.finger_table[i + 1].start < self.finger_table[i].successor['number']:\n self.finger_table[i + 1].successor = self.finger_table[i].successor\n else:\n self.finger_table[i + 1].successor = self.call_rpc(node_p['port'], 'find_successor', self.finger_table[\n i + 1].start) # node_p.find_successor(self.finger_table[i + 1].start)\n\n def update_finger_table(self, s, i):\n \"\"\"\n This function updates this node's finger table with s, if s is the i-th finger of it\n :param s: new node for entry i\n :param i: the index of finger table\n \"\"\"\n if self.finger_table[i].start != self.finger_table[i].successor['number'] \\\n and s['number'] in \\\n ModRange(self.finger_table[i].start, self.finger_table[i].successor['number'], NODES):\n self.finger_table[i].successor = s\n p = self.predecessor # get first node preceding this local node\n self.call_rpc(p['port'], 'update_finger_table', s, i)\n print(\"--------------Finger table node {} after update:--------------\".format(self.node))\n self.print_finger_table()\n self.print_node_info()\n\n def update_others(self):\n \"\"\"\n This function updates all nodes whose finger tables should refer to this local node\n \"\"\"\n for i in range(1, M + 1):\n # find the last node p whose i-th finger might be this local node\n node_p = self.find_predecessor((1 + self.node - 2 ** (i - 1) + NODES) % NODES)\n # node_p.update_finger_table(self.node, i)\n self.call_rpc(node_p['port'], 'update_finger_table', {'number': self.node, 'port': self.port_number}, i)\n\n def put_key(self, key_id, key_value):\n \"\"\"\n This function updates the keys dictionary and adds or stores the new item given by the populate request.\n\n :return: True, indicates the pair successfully added\n \"\"\"\n self.keys[key_id] = key_value\n self.print_keys_dictionary()\n self.print_node_info()\n return True\n\n def get_key(self, key_id):\n \"\"\"\n This function replies to the query question by returning the data row in the\n keys dictionary for 'key_id,' or None if the keys dictionary does not contain\n a pair with key equal to the input id.\n \"\"\"\n if key_id not in self.keys.keys():\n print(\"This id is not available in node {} keys dictionary\".format(self.node))\n return None\n return self.keys[key_id]\n\n def save_data(self, input):\n \"\"\"\n This function save the input row in the appropriate node of the Chord network.\n As identifier or key of this row, the node uses the value in the first column (player id) concatenated\n with the value in the fourth column (year).\n :param input: data row in list format\n :return: a string describing the result of populating the input data row\n \"\"\"\n row = input\n player_id = row[0]\n year = row[3]\n data_id = sha1_hash(str(player_id) + str(year))\n bucket_id = data_id % 2 ** M\n responsible = self.call_rpc(self.port_number, 'find_successor', bucket_id)\n done = self.call_rpc(responsible['port'], 'put_key', data_id, row)\n if done:\n return \"Node {} saved the row\".format(responsible['number'])\n else:\n return \"Populate failed\"\n\n def query_data(self, player_id, year):\n \"\"\"\n This function finds the node in charge of keeping a row with the given\n player id and year and retrieves the row from that node.\n\n :param player_id: first part of the identifier for a row\n :param year: second part of the identifier for a row\n :return: a row or a string describing the result of query result\n \"\"\"\n data_id = sha1_hash(str(player_id) + str(year))\n bucket_id = data_id % 2 ** M\n responsible = self.call_rpc(self.port_number, 'find_successor', bucket_id)\n try:\n row = self.call_rpc(responsible['port'], 'get_key', data_id)\n if row is None:\n return \"Data is not available.\"\n if isinstance(row, list):\n return row\n else:\n return \"Query failed!\"\n except Exception as e:\n print(\"Query failed! Error = {}\".format(e))\n return \"Query failed!\"\n\n def print_finger_table(self):\n for entry in self.finger_table:\n if entry is None:\n continue\n print(\"entry.start = {} \\t entry.stop = {} \\t entry.successor = {} \\t \"\n .format(entry.start, entry.interval.stop,\n entry.successor))\n\n def print_node_info(self):\n print('[{}] Node {} is listening on port {}'.format(datetime.now().strftime(\"%I:%M:%S.%f\"), self.node,\n self.port_number))\n print('\\tsuccessor = {}\\t\\tpredecessor = {}'.format(self.successor['number'], self.predecessor['number']))\n\n def print_keys_dictionary(self):\n print(\"-------------- Keys in node {} --------------\".format(self.node))\n for key, value in self.keys.items():\n player_id = value[0]\n year = value[3]\n data_id = sha1_hash(str(player_id) + str(year))\n bucket_id = data_id % 2 ** M\n print('\\t\\t', bucket_id, ' : ', value[0])\n\n\nif __name__ == '__main__':\n args = sys.argv[1:]\n node_port = int(args[0])\n ChordNode(node_port)\n","repo_name":"lmirzaei/distributed-systems-projects","sub_path":"Lab4_DHT/chord_node.py","file_name":"chord_node.py","file_ext":"py","file_size_in_byte":22869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"27131557112","text":"#!/usr/bin/env python3\n\n# powerful runtime error reporting mechanism\n# last line we have the error and everything is called as the tarce back\nimport sys\n# we can import sys for extra info about the error using sys.exec_info()\ndef main():\n try:\n x = int('foo')\n except ValueError:\n print(f'I caught a value error: {sys.exc_info()}')\n except ZeroDivisionError:\n print('dont divide by zero')\n except:\n print('Unknown error')\n # so we can capture the error and the execution will continue without problem\n print('Hello, World.')\n\nif __name__ == '__main__': main()","repo_name":"rudyredhat/PyEssTrainingLL","sub_path":"Ch07/07_01/exception-one.py","file_name":"exception-one.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"18669201864","text":"import sys\nimport xarray as xr\nimport dask\nimport dask.array as da\nimport netCDF4\nimport numpy as np\nfrom importlib import reload\nimport matplotlib as mpl\nfrom matplotlib import cm\nimport matplotlib.colors as colors\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom mpl_toolkits.basemap import Basemap\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom matplotlib.colors import LinearSegmentedColormap\nimport pandas as pd\nimport regionmask\nimport cartopy.crs as ccrs\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# PATHS\nSM_PATH = '../XAI/data/'\nFIGS_PATH = './figs/'\n\n# IPCC Regions\nregions = ['NWN', 'NEN', 'WNA', 'CNA', 'ENA', 'NCA']\nregionsToRemove = ['SCA', 'CAR']\n\n# Models\nmodels = ['CNN10', 'CNN10_stand',\n 'CNNPan', 'CNN_UNET']\n\nnamesPlot = {'CNN10': 'DeepESD',\n 'CNN10_stand': 'DeepESD-Stand',\n 'CNNPan': 'CNN-PAN',\n 'CNN_UNET': 'CNN-UNET'}\n\n# Neurons\nidxNeuron = ['2000', '5950']\n\n# Vars to plot\nvarsToPlot = ['ta@1000', 'hus@1000']\n\n# Vars and heights dicts\nallVars = ['z@500', 'z@700', 'z@850', 'z@1000',\n 'hus@500', 'hus@700', 'hus@850', 'hus@1000',\n 'ta@500', 'ta@700', 'ta@850', 'ta@1000',\n 'ua@500', 'ua@700', 'ua@850', 'ua@1000',\n 'va@500', 'va@700', 'va@850', 'va@1000']\n\n# Initialize figure\nnRows = len(models)\n\nfig = plt.figure(figsize = (30, 20))\nouter = gridspec.GridSpec(nRows, 1,\n wspace = -0.9, hspace = -0.1)\n\nimportanceMin = 0.1\nimportanceMax = 0.7\ncolorSchema = 'magma_r'\ncmap = plt.get_cmap(colorSchema)\ncmap.set_under('white')\n\n# IPCC Regions\nar6 = regionmask.defined_regions.ar6.all\nnorthAmerica = ar6[regions]\n\n# Dict defining coords to print region mean values\ndictCoord = {'NWN': [0.4, 0.85],\n 'NEN': [0.8, 0.85],\n 'WNA': [0.45, 0.55],\n 'CNA': [0.65, 0.55],\n 'ENA': [0.8, 0.5],\n 'NCA': [0.57, 0.25],\n 'SCA': [0.69, 0.1],\n 'CAR': [0.89, 0.1]}\n\n# Iterate over subplots\ngeneralRow = 0\nfor model in models:\n\n # Initialize inner plot\n inner = gridspec.GridSpecFromSubplotSpec(1, len(varsToPlot) * len(idxNeuron),\n subplot_spec = outer[generalRow],\n wspace = 0.1, hspace = 0.1)\n\n # Plot Saliency Maps\n innerCol = 0\n\n for var in varsToPlot:\n for neuron in idxNeuron:\n\n # Load Saliency Map\n nameSM = 'SMtrainSet_' + model + '_neuron' + str(neuron) + '.npy'\n saliencyMaps = np.load(SM_PATH + nameSM)\n\n # Compute mean of saliency maps\n saliencyMaps = np.mean(saliencyMaps, axis=0)\n\n # Idx of variable\n combIdx = allVars.index(var)\n\n # Inner\n axes = plt.Subplot(fig, inner[0, innerCol])\n\n if innerCol == 0:\n axes.set_ylabel(namesPlot[model], fontsize = 16, weight = 'bold')\n\n # Compute subplot\n map = Basemap(ax = axes,\n llcrnrlon = -164.75, llcrnrlat = 11.75,\n urcrnrlon = -59.75, urcrnrlat = 69.75,\n resolution = 'c')\n\n im = map.imshow(saliencyMaps[:, :, combIdx],\n vmin = importanceMin, vmax = importanceMax,\n cmap = cmap)\n\n map.drawcoastlines(linewidth = 0.2, color = 'gray')\n\n fig.add_subplot(axes)\n\n innerCol = innerCol + 1\n\n # Saliency maps colorbar\n cbar_ax = fig.add_axes([0.36, 0.08, 0.3, 0.012])\n cb = fig.colorbar(im, cax = cbar_ax, orientation = 'horizontal',\n extend = 'min')\n cb.ax.xaxis.set_ticks_position('top')\n cb.ax.tick_params(labelsize = 24)\n cb.set_label(label = 'Relevance (unitless)', fontsize = 26)\n\n generalRow = generalRow + 1\n\n# Cols title\nfig.text(0.255-0.055, 0.9, 'Air temperature (1000 hPa)',\n fontsize = 36)\nfig.text(0.175, 0.86, 'North Point',\n fontsize = 28)\nfig.text(0.375, 0.86, 'South Point',\n fontsize = 28)\n\nfig.text(0.65-0.065, 0.9, 'Specific Humidity (1000 hPa)',\n fontsize = 36)\nfig.text(0.57, 0.86, 'North Point',\n fontsize = 28)\nfig.text(0.77, 0.86, 'South Point',\n fontsize = 28)\n\nplt.savefig(FIGS_PATH + 'figSM_train.pdf',\n dpi = 300, bbox_inches = 'tight')","repo_name":"jgonzalezab/XAI-Statistical-Downscaling","sub_path":"figures/figSMs.py","file_name":"figSMs.py","file_ext":"py","file_size_in_byte":4396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"3835403813","text":"import tensorflow as tf\n\n\ndef pairwise_ranking_crossentropy_loss(y_true, y_pred):\n \"\"\"\n :@param y_true: [batch = target_betweenness | src_ids | tgt_ids]\n :@param y_pred: [batch = pred_betweenness]\n The original DrBC implementation uses 5*N src_id,tgt_id pairs from a graph of N nodes\n \"\"\"\n pred_betweenness = y_pred\n target_betweenness = tf.slice(y_true, begin=(0, 0), size=(-1, 1))\n src_ids = tf.cast(tf.reshape(tf.slice(y_true, begin=(0, 1), size=(-1, 5)), (-1,)), 'int32')\n tgt_ids = tf.cast(tf.reshape(tf.slice(y_true, begin=(0, 6), size=(-1, 5)), (-1,)), 'int32')\n\n labels = tf.nn.embedding_lookup(target_betweenness, src_ids) - tf.nn.embedding_lookup(target_betweenness, tgt_ids)\n preds = tf.nn.embedding_lookup(pred_betweenness, src_ids) - tf.nn.embedding_lookup(pred_betweenness, tgt_ids)\n return tf.nn.sigmoid_cross_entropy_with_logits(logits=preds, labels=tf.sigmoid(labels))\n","repo_name":"MartinXPN/DrBC","sub_path":"drbc/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"8373775631","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom prewikka import registrar, usergroup\n\n\nclass CLIManager(object):\n def __init__(self):\n self._commands = {}\n\n def _register(self, command, category, method, permissions, help, **options):\n d = self._commands.setdefault(command, {})\n if category not in d:\n # Avoid replacing methods by the ones from children classes\n d[category] = (method, permissions, help, options)\n\n def register(self, command, category, method=None, permissions=[], help=None, **options):\n usergroup.ALL_PERMISSIONS.declare(permissions)\n\n if method:\n self._register(command, category, method, permissions, help, **options)\n else:\n return registrar.DelayedRegistrar.make_decorator(\"cli\", self._register, command, category, permissions=permissions, help=help, **options)\n\n def unregister(self, command=None, category=None):\n if command and category:\n self._commands[command].pop(category)\n elif command:\n self._commands.pop(command)\n else:\n self._commands = {}\n\n def get(self, command):\n return self._commands.get(command, {})\n\n\ncli = CLIManager()\nget = cli.get\nregister = cli.register\nunregister = cli.unregister\n","repo_name":"Prelude-SIEM/prewikka","sub_path":"prewikka/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"}
+{"seq_id":"36355193284","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the closestNumbers function below.\ndef closestNumbers(arr):\n arr.sort()\n diff=[]\n for i in range(1, len(arr)):\n diff.append(arr[i]-arr[i-1])\n p= diff.index(min(diff))\n pl=[]\n pl.append(p)\n arr1=[]\n\n for j in range(len(diff)):\n if j!=p:\n if diff[j]==diff[p]:\n pl.append(j)\n for x in pl:\n arr1.append(arr[x])\n arr1.append(arr[x+1])\n return arr1\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n arr = list(map(int, input().rstrip().split()))\n\n result = closestNumbers(arr)\n\n fptr.write(' '.join(map(str, result)))\n fptr.write('\\n')\n\n fptr.close()\n","repo_name":"shkhrkat/HackerRank-solutions","sub_path":"Closest_Numbers.py","file_name":"Closest_Numbers.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"6888570049","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n\nnp.random.seed(1)\nfig = plt.figure(figsize=(4, 4), facecolor='white')\nax = fig.add_axes([0,0,1,1], frameon=False)\n\n# Generate random data\ndata = np.random.uniform(0, 1, (64, 100))\nX = np.linspace(-1, 1, data.shape[-1])\nG = 1.5 * np.exp(-4 * X ** 2)\n\n# Generate line plots\nlines = []\nfor i in range(len(data)):\n # Small reduction of the X extents to get a cheap perspective effect\n xscale = 1 - i / 200.\n # Same for linewidth (thicker strokes on bottom)\n lw = 1. - i / 100.0\n line, = ax.plot(xscale * X, i + G * data[i], color=\"black\", lw=lw)\n lines.append(line)\n\n# Set y limit (to avoid cropping because of thickness)\nax.set_ylim(-2, 65)\nax.set_xticks([])\nax.set_yticks([])\n\ndef update(*args):\n # Shift all data to the right\n data[:, 1:] = data[:, :-1]\n\n # Fill-in new values\n data[:, 0] = np.random.uniform(0, 1, len(data))\n\n # Update data\n for i in range(len(data)):\n lines[i].set_ydata(i + G * data[i])\n\n # Return modified artists\n return lines\n\nanim = animation.FuncAnimation(fig, update, frames=100, interval=20)\nanim.save('unknown-pleasures.gif', writer='imagemagick', fps=60)\nplt.show()\n","repo_name":"rougier/unknown-pleasures","sub_path":"unknown-pleasures.py","file_name":"unknown-pleasures.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"72"}
+{"seq_id":"72695735914","text":"from django.contrib import messages\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.views import generic\nfrom django.db.utils import IntegrityError\nfrom marketing.models import Interaksi\nfrom marketing.forms import InteraksiForm\n\n\nclass InteraksiListView(generic.ListView):\n queryset = Interaksi.objects.all().order_by(\"-created_at\")\n template_name = \"marketing/interaksi_list.html\"\n\n\n@staff_member_required(login_url=\"accounts:login\")\ndef createInteraksi(request):\n if request.method == \"POST\":\n if request.POST[\"no_hp\"]:\n # format no hp to 62xxx\n request.POST[\"no_hp\"][0][1:].replace(\" \", \"\").replace(\"-\", \"\")\n request.POST._mutable = True\n if request.POST[\"no_hp\"].startswith(\"0\"):\n request.POST[\"no_hp\"] = \"62\" + request.POST[\"no_hp\"][1:].replace(\n \" \", \"\"\n ).replace(\"-\", \"\")\n elif request.POST[\"no_hp\"].startswith(\"+\"):\n request.POST[\"no_hp\"] = (\n request.POST[\"no_hp\"][1:].replace(\" \", \"\").replace(\"-\", \"\")\n )\n elif request.POST[\"no_hp\"].startswith(\"6\"):\n request.POST[\"no_hp\"] = (\n request.POST[\"no_hp\"].replace(\" \", \"\").replace(\"-\", \"\")\n )\n\n form = InteraksiForm(request.POST)\n if form.is_valid():\n interaksi = form.save(commit=False)\n interaksi.tim_marketing = request.user\n try:\n interaksi.save()\n except IntegrityError:\n messages.error(request, \"Data sudah ada pada hari ini\")\n return redirect(\"marketing:list_interaksi\")\n\n messages.success(request, \"Interaksi berhasil ditambahkan\")\n return redirect(\"marketing:list_interaksi\")\n else:\n form = InteraksiForm()\n\n return render(\n request,\n \"marketing/add_interaksi.html\",\n {\"form\": form, \"title\": \"Add Interaksi\"},\n )\n\n\n@staff_member_required(login_url=\"accounts:login\")\ndef editInteraksi(request, pk):\n interaksi = get_object_or_404(Interaksi, pk=pk)\n if request.method == \"POST\":\n form = InteraksiForm(request.POST, instance=interaksi)\n if form.is_valid():\n form.save()\n messages.success(request, \"Pendaftaran berhasil di edit!\")\n return redirect(\"marketing:list_interaksi\")\n else:\n form = InteraksiForm(instance=interaksi)\n\n context = {\"form\": form, \"title\": \"Update Interaksi\"}\n return render(request, \"marketing/add_interaksi.html\", context)\n\n\n@staff_member_required(login_url=\"accounts:login\")\ndef delete_interaksi(request, pk):\n interaksi = get_object_or_404(Interaksi, pk=pk)\n interaksi.delete()\n messages.success(request, \"Interaksi berhasil di hapus\")\n return redirect(\"marketing:list_interaksi\")\n\n\n@staff_member_required(login_url=\"accounts:login\")\ndef interaksi_bulanan(request):\n return render(request, \"marketing/interaksi_graph.html\")\n","repo_name":"ArRosid/idn-dashboard","sub_path":"marketing/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"6972998387","text":"import nltk\nimport langid\nimport numpy as np\nfrom prepare import removeStop\nfrom prepare import removeNer\nfrom sentiClassify import sentiment\n\n\npath = r'data/stopwords.txt'\n\n# 对一篇文章中的每段进行相关的情感分析之后进行\ncount = int(1)\nres_pos = []\nres_neg = []\nres_neu = []\nfor i in nlp_data:\n print(count)\n str1 = flatten_str(i)\n tokens = nltk.word_tokenize(str1)\n rm = removeStop(tokens, path)\n rm_s = rm.remove_stoplist()\n\n test = removeNer(rm_s)\n input_sent = test.remove_ner()\n res_sent = sentiment(input_sent)\n res_pos.append(res_sent['pos'])\n res_neg.append(res_sent['neg'])\n res_neu.append(res_sent['neu'])\n count += 1\n\n\n\n# ---nltk中进行的人名,地点提取\nres_person = []\nres_location =[]\nfor i in nlp_data:\n str1 = flatten_str(i)\n tokens = nltk.word_tokenize(str1)\n rm = removeStop(tokens, path)\n rm_s = rm.remove_stoplist()\n\n aaa = removeNer(rm_s)\n person = aaa.get_person()\n location = aaa.get_loaction()\n res_person.append(person)\n res_location.append(location)\n\nfrom collections import Counter\n# 去除重复值进行\ndef most_list(res_person):\n a =Counter(res_person)\n temp =a.most_common(3)\n person3 =[i[0] for i in temp]\n return person3\n\nres_person1 = [most_list(i) for i in res_person]\nres_location1 = [most_list(i) for i in res_location]\n\nres_person1 = [\",\".join(list(set(i))) for i in res_person1]\nres_location1 = [\",\".join(list(set(i))) for i in res_location1]\n\n\n\n# ---进行后面的鬼鬼,数据合并\n\ndef na_replace(list_np):\n rd2 = [round(i*100,2) for i in list_np]\n temp = np.array(rd2)\n index_nan = np.isnan(temp)\n index_inf = np.isinf(temp)\n temp[index_nan] = float(0)\n temp[index_inf] = float(0)\n out_list = [i.item() for i in temp]\n return out_list\n\nres_pos1 = na_replace(res_pos)\nres_neu1 = na_replace(res_neu)\nres_neg1 = na_replace(res_neg)\n\ntt = zip(res_pos1, res_neu1, res_neg1, top5, res_person1, res_location1, sim_where, article)\nto_sql = list(tt)\n\n\n\n","repo_name":"goal1234/nlp","sub_path":"lie/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"28712217152","text":"import os\nimport json\n\n\nclass Jit(object):\n def __init__(self, event, context, iot):\n self.event = event\n self.context = context\n self.iot = iot\n\n def _certificate_arn(self):\n aws_region = os.getenv('AWS_REGION')\n aws_account_id = self.event.get('awsAccountId')\n certificate_id = self.event.get('certificateId')\n\n return f'arn:aws:iot:{aws_region}:{aws_account_id}:cert/{certificate_id}'\n\n def _iot_policy(self):\n return json.dumps({\n 'Version': '2012-10-17',\n 'Statement': [\n {\n 'Effect': 'Allow',\n 'Action': [\n 'iot:Publish',\n 'iot:Receive',\n 'iot:Subscribe',\n 'iot:Connect'\n ],\n 'Resource': '*'\n }\n ]\n })\n\n def _iot_policy_name(self):\n return f'{self.event.get(\"certificateId\")}-policy'\n\n def _create_iot_policy(self):\n return self.iot.create_policy(policyName=self._iot_policy_name(),\n policyDocument=self._iot_policy())\n\n def _does_iot_policy_exist(self):\n policies = self.iot.list_policies().get('policies')\n\n for policy in policies:\n if policy.get('policyName') == self._iot_policy_name():\n return True\n return False\n\n def _update_iot_certificate(self):\n self.iot.update_certificate(certificateId=self.event.get('certificateId'),\n newStatus='ACTIVE')\n\n def _attach_iot_policy(self):\n self.iot.attach_policy(policyName=self._iot_policy_name(),\n target=self._certificate_arn())\n\n def main(self):\n try:\n if not self._does_iot_policy_exist():\n self._create_iot_policy()\n self._attach_iot_policy()\n self._update_iot_certificate()\n except Exception as e:\n print(e)\n","repo_name":"knakayama/aws-iot-playground","sub_path":"jit/src/handlers/jit/jit.py","file_name":"jit.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"30201635280","text":"# -*-coding:utf-8-*-\nimport jieba.analyse\nimport numpy as np\nfrom flask import Flask, request, jsonify\nimport ast\nimport pymysql\nimport jieba\n\napp = Flask(__name__)\njieba.initialize() # 手动初始化\njieba.load_userdict(\"./vocab.txt\")\njieba.enable_parallel(4)\nwith open('../data_helper/stopwords.txt', 'r') as f:\n stopwords = [word.replace('\\n', '') for word in f.readlines()]\n\n\ndef inspection_spec_relation_DB(inspection_name):\n db = pymysql.connect(host=\"172.30.2.231\", user=\"zhangziang\",\n password=\"Dzjzza*2022\", database=\"hrs\", port=3306, autocommit=False)\n sql = f\"select * from inspection_spec_relation where name = '{inspection_name}'\"\n cursor = db.cursor()\n row_count = cursor.execute(sql)\n\n return cursor\n\n\ndef sim(keywords1, keywords2):\n # jaccard\n intersection = len(list(set(keywords1).intersection(set(keywords2))))\n sample_len = min(len(keywords1), len(keywords2))\n # union = len(list(set(keywords_sample).union(set(keywords2_match))))\n # 除零处理\n # sim = float(intersection) / union if union != 0 else 0\n sim = float(intersection) / sample_len if sample_len != 0 else 0\n if sim > 0.6:\n return True\n else:\n return False\n\n\ndef inspection_calculate(input_inspectionName, inspection_value, DB_data):\n name = DB_data[1]\n data_type = DB_data[2]\n operator = DB_data[3]\n min_value = float(DB_data[4]) if DB_data[4] is not None else None\n max_value = float(DB_data[5]) if DB_data[5] is not None else None\n unique_code = DB_data[6]\n\n assert input_inspectionName == name\n assert data_type == 'NUMBER'\n\n if operator == \"EQUALS\":\n if min_value is None:\n return None\n elif inspection_value == min_value:\n return unique_code\n else:\n return None\n elif operator == \"GREATER_EQUAL\":\n if max_value is None:\n return None\n elif inspection_value >= max_value:\n return unique_code\n else:\n return None\n elif operator == \"LESS_EQUAL\":\n if min_value is None:\n return None\n elif inspection_value <= min_value:\n return unique_code\n else:\n return None\n elif operator == \"LESS\":\n if min_value is None:\n return None\n elif inspection_value < min_value:\n return unique_code\n else:\n return None\n elif operator == \"GREATER\":\n if max_value is None:\n return None\n elif inspection_value > max_value:\n return unique_code\n else:\n return None\n elif operator == \"RANGE\":\n if min_value is None or max_value is None:\n return None\n elif min_value < inspection_value < max_value:\n return unique_code\n else:\n return None\n else:\n return None\n\n\n@app.route(\"/wm_semantic_sim\", methods=[\"POST\"])\ndef semantic_sim():\n data = request.get_json()\n result = {'unique_code': []}\n for input_inspectionName in data:\n try:\n inspection_value = float(data[input_inspectionName])\n for DB_data in inspection_spec_relation_DB(input_inspectionName).fetchall():\n unique_code = inspection_calculate(input_inspectionName, inspection_value, DB_data)\n if unique_code is not None:\n result['unique_code'].append(unique_code)\n else:\n continue\n except Exception:\n input_inspection_value = str(data[input_inspectionName])\n cut_input = [word for word in jieba.cut(input_inspection_value) if word not in stopwords]\n keyword_input = jieba.analyse.extract_tags(\"|\".join(cut_input), topK=200, withWeight=False)\n for DB_data in inspection_spec_relation_DB(input_inspectionName).fetchall():\n cut_DB = [word for word in jieba.cut(DB_data[4]) if word not in stopwords]\n keyword_DB = jieba.analyse.extract_tags(\"|\".join(cut_DB), topK=200, withWeight=False)\n if sim(keyword_input, keyword_DB) is True:\n result['unique_code'].append(DB_data[6])\n else:\n continue\n return jsonify(result)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', threaded=True, port=5003)\n","repo_name":"274349293/MedBrain","sub_path":"text_sim/wm_sim/wm_semantic_match_API.py","file_name":"wm_semantic_match_API.py","file_ext":"py","file_size_in_byte":4336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"71208641832","text":"#--------------------------------------------------------\n# LibUnits.py - Define system of units OpenSees model\n# Code: II\n#--------------------------------------------------------\n# base units (SI units)\nm = 1\nkg = 1\ns = 1\n# other units\nN = kg*m/s**2\ncm = 0.01*m\nPa = N/m**2\nksi = 6894757.2932*Pa\nkgf = 9.8066*N\nMPa = 10**6*Pa\npsi = 6894.76*Pa\n# physical constants\ng = 9.80665*m/s**2\n\n# Propiedades de los materiales\nfc = 210 # kg/cm2\nE = 15100*fc**0.5*10**4*9.80665*Pa\nG = 0.5*E/(1+0.2)\n# Sección de Columna\na = 60*cm\nAc = a**2\nρlc = 2400*Ac*m**2\nIzc = a**4/12\nIyc = a**4/12\nJxxc = 2.25*(a/2)**4\n# Sección de Viga\nb = 60*cm\nh = 30*cm\nA = b*h\nρl = 2400*A*m**2\nIz = b*h**3/12\nIy = b**3*h/12\nJxx = 0.229*max(b,h)*min(b,h)**3 # modificar\n#\ndef GeoModel(dx, dy, h, nx, ny, nz):\n from numpy import zeros, ones\n import matplotlib.pyplot as plt\n # import matplotlib.pyplot as plt\n Lx, Ly, Lz = dx*nx, dy*ny, h*nz\n NN = (nx+1)*(ny+1)*(nz+1)\n Nodes = zeros((NN,5))\n # Creando los nodos y asignando coordenadas\n c = 0\n for i in range(nz+1):\n for j in range(ny+1):\n for k in range(nx+1):\n if k == nx and j != ny and j!= 0:\n Nodes[c] = [c,k*dx,j*dy,i*h,0.50]\n elif k != nx and j == ny and k!= 0:\n Nodes[c] = [c,k*dx,j*dy,i*h,0.50]\n elif k == 0 and j != ny and j!= 0:\n Nodes[c] = [c,k*dx,j*dy,i*h,0.50]\n elif k != nx and j == 0 and k!= 0:\n Nodes[c] = [c,k*dx,j*dy,i*h,0.50]\n elif k == nx and j == ny:\n Nodes[c] = [c,k*dx,j*dy,i*h,0.25]\n elif k == 0 and j == 0:\n Nodes[c] = [c,k*dx,j*dy,i*h,0.25]\n elif k == nx and j == 0:\n Nodes[c] = [c,k*dx,j*dy,i*h,0.25]\n elif k == 0 and j == ny:\n Nodes[c] = [c,k*dx,j*dy,i*h,0.25]\n else:\n Nodes[c] = [c,k*dx,j*dy,i*h,1.00]\n c = c + 1\n Nodes[:(nx+1)*(ny+1),4]=0\n # print(Nodes)\n\n NE = (nx*(ny+1)+ny*(nx+1)+(nx+1)*(ny+1))*nz\n Elems = zeros((NE,4))\n # Creando las conexiones de los elementos verticales\n c = 0\n for i in range(nz):\n for j in range(ny+1):\n for k in range(nx+1):\n Elems[c] = [c,c,c+(nx+1)*(ny+1),1]\n c = c + 1\n # Creando las conexiones de los elementos horizontales\n m = (nx+1)*(ny+1)\n for i in range(nz):\n for j in range(ny+1):\n for k in range(nx):\n Elems[c] = [c,m,m+1,2]\n m = m + 1\n c = c + 1\n m = m + 1\n # Creando las conexiones de los elementos horizontales\n n = 0 \n for i in range(nz):\n n = n + (nx+1)*(ny+1)\n for j in range(nx+1):\n for k in range(ny):\n Elems[c] = [c,j+k*(nx+1)+n,j+nx+1+k*(nx+1)+n,2]\n c = c + 1\n # Creando centro de diafragmas\n Diap = zeros((nz,4))\n for i in range(nz):\n Diap[i] = [i+1000,Lx/2.0,Ly/2.0,h*(i+1)]\n #\n return Nodes, Elems, Diap\n\ndef espectro_E030(T,Z=0.45,U=1.5,S=1.0,Tp=0.4,Tl=2.5,R=1):\n from numpy import zeros\n n = len(T)\n E030 = zeros(n)\n for i in range(n):\n if T[i]>=0 and T[i]<0.2*Tp:\n E030[i]=2.5#1+7.5*T[i]/Tp\n elif T[i]>=0.2*Tp and T[i]=Tp and T[i]=Tl:\n E030[i] = 2.5*(Tp*Tl/T[i]**2)\n else:\n print(\"El periodo no puede ser negativo!\")\n return E030*Z*U*S/R\n\ndef get_static_loads(coef,p,h,T):\n from numpy import zeros\n n = len(h)\n V = coef*sum(p)\n F = zeros(n)\n #\n if T > 0.0 and T <= 0.5:\n k=1.0\n elif T>0.5:\n k = 0.75+0.5*T\n else:\n print('El periodo es negativo!')\n #\n div = 0.\n for i in range(n):\n div = div + p[i]*h[i]**k\n #\n for i in range(n):\n F[i] = p[i]*h[i]**k/div*V\n return F,k\n\ndef getCombo(E030,MF,modo,NT,Tmodes):\n import numpy as np\n import pandas as pd\n # Obtenemos las Masas totales\n Mx = sum(sum(MF[0::3,0::3]))\n My = sum(sum(MF[1::3,1::3]))\n Mr = sum(sum(MF[2::3,2::3]))\n\n # Definimos valores iniciales\n Ux,Uy,Rz = np.zeros(NT),np.zeros(NT),np.zeros(NT)\n Ux[0::3]=1\n Uy[1::3]=1\n Rz[2::3]=1\n SUMx, SUMy, SUMr = 0., 0., 0.\n Nmodes = len(modo) \n\n # Obtención de Masas Participativas\n ni=0\n np.set_printoptions(precision = 4)\n df1 = pd.DataFrame(columns=['Modo','T(s)','SumUx','SumUy','SumRz'])\n for j in range(1,Nmodes+1):\n FPx=modo[j-1].T@MF@Ux\n FPy=modo[j-1].T@MF@Uy\n FPr=modo[j-1].T@MF@Rz\n FPRx=FPx**2/Mx\n FPRy=FPy**2/My\n FPRr=FPr**2/Mr\n SUMx = SUMx + FPRx\n SUMy = SUMy + FPRy\n SUMr = SUMr + FPRr\n #\n if min(SUMx,SUMy,SUMr)>0.90 and ni==0:\n ni = j\n df1 = df1.append({'Modo':j, 'T(s)':Tmodes[j-1],'SumUx':SUMx,\n 'SumUy':SUMy,'SumRz':SUMr}, ignore_index=True)\n print('N° mínimo de Modos a considerar:',ni)\n\n # Definimos valores iniciales\n D_ABSx,D_RCSCx = np.zeros(NT),np.zeros(NT)\n Δ_ABSx,Δ_RCSCx = np.zeros(NT),np.zeros(NT)\n V_ABSx,V_RCSCx = np.zeros(NT),np.zeros(NT)\n D_ABSy,D_RCSCy = np.zeros(NT),np.zeros(NT)\n Δ_ABSy,Δ_RCSCy = np.zeros(NT),np.zeros(NT)\n V_ABSy,V_RCSCy = np.zeros(NT),np.zeros(NT)\n\n # Se realiza la Superpocisión Modal Espectral\n for j in range(1,ni+1):#ni+1\n FPx=modo[j-1].T@MF@Ux\n FPy=modo[j-1].T@MF@Uy\n FPr=modo[j-1].T@MF@Rz\n #\n Sa = E030[j-1]\n Sd = Sa*9.80665/(2*np.pi/Tmodes[j-1])**2\n #\n respDX = Sd*FPx*modo[j-1]\n respAX = Sa*FPx*MF@modo[j-1]\n D_ABSx = D_ABSx + abs(respDX)\n D_RCSCx = D_RCSCx + (respDX)**2\n respDX[3:] = respDX[3:] - respDX[:-3]\n Δ_ABSx = Δ_ABSx + abs(respDX)\n Δ_RCSCx = Δ_RCSCx + (respDX)**2\n V_ABSx = V_ABSx + abs(np.cumsum(respAX[::-1])[::-1])\n V_RCSCx = V_RCSCx + (np.cumsum(respAX[::-1])[::-1])**2\n #\n respDY = Sd*FPy*modo[j-1]\n respAY = Sa*FPy*MF@modo[j-1]\n D_ABSy = D_ABSy + abs(respDY)\n D_RCSCy = D_RCSCy + (respDY)**2\n respDY[3:] = respDY[3:] - respDY[:-3]\n Δ_ABSy = Δ_ABSy + abs(respDY)\n Δ_RCSCy = Δ_RCSCy + (respDY)**2\n V_ABSy = V_ABSy + abs(np.cumsum(respAY[::-1])[::-1])\n V_RCSCy = V_RCSCy + (np.cumsum(respAY[::-1])[::-1])**2\n\n # Se realiza la combinación 25%ABS + 75%RCSC\n D_RCSCx = D_RCSCx**0.5\n Δ_RCSCx = Δ_RCSCx**0.5\n V_RCSCx = V_RCSCx**0.5\n DDx = 0.25*D_ABSx + 0.75*D_RCSCx\n ΔDx = 0.25*Δ_ABSx + 0.75*Δ_RCSCx\n VDx = 0.25*V_ABSx + 0.75*V_RCSCx\n #\n D_RCSCy = D_RCSCy**0.5\n Δ_RCSCy = Δ_RCSCy**0.5\n V_RCSCy = V_RCSCy**0.5\n DDy = 0.25*D_ABSy + 0.75*D_RCSCy\n ΔDy = 0.25*Δ_ABSy + 0.75*Δ_RCSCy\n VDy = 0.25*V_ABSy + 0.75*V_RCSCy\n \n df2 = pd.DataFrame(columns=['Nivel','VDx(ton)','VDy(ton)','UDx(cm)','UDy(cm)'])\n for i in range(int(NT/3)):\n df2 = df2.append({'Nivel':i+1, 'VDx(ton)':VDx[0::3][i]/1000,\n 'VDy(ton)':VDy[1::3][i]/1000,'UDx(cm)':DDx[0::3][i]*100,\n 'UDy(cm)':DDy[1::3][i]*100}, ignore_index=True)\n\n return DDx, ΔDx, VDx, DDy, ΔDy, VDy, df1.iloc[:ni,:], df2\n\ndef genReport(df1,df2,df3,df4,df5,texto1,texto2):\n from PIL import Image\n import glob\n #\n lista = glob.glob('./imagenes/Mod*.png')\n #\n for archivo in lista:\n im = Image.open(archivo)\n width, height = im.size\n left, top = width/6, height/6\n right, bottom = 9*width/10, 9*height/10\n im1 = im.crop((left, top, right, bottom))\n im1.save(archivo)\n #\n from docx import Document\n from docx.shared import Inches\n from docx.enum.text import WD_ALIGN_PARAGRAPH\n\n document = Document()\n title1 = document.add_heading('Informe del Análisis Sísmico', 0)\n title1.alignment = WD_ALIGN_PARAGRAPH.CENTER\n\n p = document.add_paragraph('Realizado por ')\n p.add_run('JPI Ingeniería e Innovación SAC').bold = True\n p.add_run(' para el curso ')\n p.add_run('ASEP.').italic = True\n\n document.add_paragraph('Edificio Analizado - vista 3D:')\n document.add_picture('./imagenes/Modelo_3D.png', width=Inches(5.0))\n document.add_paragraph('Edificación de Categoría Tipo C.')\n\n document.add_heading('Generalidades', level=1)\n document.add_paragraph('Metrado de Cargas', style='Intense Quote')\n\n document.add_paragraph('Para el metrado de cargas se consideró las siguientes cargas distribuidas:')\n document.add_paragraph('Carga Viva:\\t\\t\\t\\t250 kg/m2', style='List Bullet')\n document.add_paragraph('Carga de Losa:\\t\\t\\t300 kg/m2', style='List Bullet')\n document.add_paragraph('Carga de Acabados:\\t\\t100 kg/m2', style='List Bullet')\n document.add_paragraph('Carga de Tabiquería:\\t\\t150 kg/m2', style='List Bullet')\n\n document.add_picture('./imagenes/Modelo_numerico.png', width=Inches(5.0))\n f1 = document.add_paragraph('Figura 1: Modelo Numérico para el Análisis.')\n f1.alignment = WD_ALIGN_PARAGRAPH.CENTER\n\n document.add_heading('Análisis Modal', level=1)\n document.add_paragraph('Modos de Vibración', style='Intense Quote')\n #\n t1 = document.add_paragraph('\\nTabla 1: Factor de Participación de Masas.')\n t1.alignment = WD_ALIGN_PARAGRAPH.CENTER\n table1 = document.add_table(rows=df1.shape[0]+1, cols=df1.shape[1])\n table1.style = 'Light Grid Accent 1'\n for j in range(df1.shape[-1]):\n table1.cell(0,j).text = df1.columns[j]\n for i in range(df1.shape[0]):\n for j in range(df1.shape[-1]):\n table1.cell(i+1,j).text = str(df1.values[i,j].round(4))\n\n document.add_picture('./imagenes/Modo_1.png', width=Inches(5.0))\n f2 = document.add_paragraph('Figura 2: Primer modo de vibración.')\n f2.alignment = WD_ALIGN_PARAGRAPH.CENTER\n\n document.add_picture('./imagenes/Modo_2.png', width=Inches(5.0))\n f3 = document.add_paragraph('Figura 3: Segundo modo de vibración.')\n f3.alignment = WD_ALIGN_PARAGRAPH.CENTER\n\n document.add_picture('./imagenes/Modo_3.png', width=Inches(5.0))\n f4 = document.add_paragraph('Figura 4: Tercer modo de vibración.')\n f4.alignment = WD_ALIGN_PARAGRAPH.CENTER\n #\n document.add_heading('Análisis Sísmico', level=1)\n document.add_paragraph('Análisis Estático', style='Intense Quote')\n p1 = document.add_paragraph(texto1)\n p1.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY\n #\n t2 = document.add_paragraph('Tabla 2: Fuerzas y desplazamientos del análisis estático en X.')\n t2.alignment = WD_ALIGN_PARAGRAPH.CENTER\n table3 = document.add_table(rows=df3.shape[0]+1, cols=df3.shape[1])\n table3.style = 'Light Grid Accent 1'\n for j in range(df3.shape[-1]):\n table3.cell(0,j).text = df3.columns[j]\n for i in range(df3.shape[0]):\n for j in range(df3.shape[-1]):\n table3.cell(i+1,j).text = str(df3.values[i,j].round(4))\n t3 = document.add_paragraph('\\nTabla 3: Fuerzas y desplazamientos del análisis estático en Y.')\n t3.alignment = WD_ALIGN_PARAGRAPH.CENTER\n #\n table4 = document.add_table(rows=df4.shape[0]+1, cols=df4.shape[1])\n table4.style = 'Light Grid Accent 1'\n for j in range(df4.shape[-1]):\n table4.cell(0,j).text = df4.columns[j]\n for i in range(df4.shape[0]):\n for j in range(df4.shape[-1]):\n table4.cell(i+1,j).text = str(df4.values[i,j].round(4))\n #\n document.add_paragraph('Análisis Dinámico Modal Espectral', style='Intense Quote')\n #\n document.add_paragraph('En este análisis se consideraron los siguientes parámetros sísmicos:')\n document.add_paragraph('Factor de Zona:\\t\\t\\t\\tZ = 0.45', style='List Bullet')\n document.add_paragraph('Factor de Uso:\\t\\t\\t\\tU = 1.00', style='List Bullet')\n document.add_paragraph('F. de Amplificación del Suelo:\\t\\tS = 1.00', style='List Bullet')\n document.add_paragraph('Coef. de Reducción:\\t\\t\\tRo= 8.00', style='List Bullet')\n\n document.add_picture('./imagenes/Espectro_E030.png', width=Inches(5.4))\n f5 = document.add_paragraph('Figura 5: Espectro según la norma E030.')\n f5.alignment = WD_ALIGN_PARAGRAPH.CENTER\n\n t4 = document.add_paragraph('\\nTabla 4: Respuesta Dinámica sin escalar.')\n t4.alignment = WD_ALIGN_PARAGRAPH.CENTER\n table2 = document.add_table(rows=df2.shape[0]+1, cols=df2.shape[1])\n table2.style = 'Light Grid Accent 1'\n for j in range(df2.shape[-1]):\n table2.cell(0,j).text = df2.columns[j]\n for i in range(df2.shape[0]):\n for j in range(df2.shape[-1]):\n table2.cell(i+1,j).text = str(df2.values[i,j].round(4))\n #\n p2 = document.add_paragraph(texto2)\n p2.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY\n #\n t5 = document.add_paragraph('\\nTabla 5: Respuesta Dinámica Escalada.')\n t5.alignment = WD_ALIGN_PARAGRAPH.CENTER\n table5 = document.add_table(rows=df5.shape[0]+1, cols=df5.shape[1])\n table5.style = 'Light Grid Accent 1'\n for j in range(df5.shape[-1]):\n table5.cell(0,j).text = df5.columns[j]\n for i in range(df5.shape[0]):\n for j in range(df5.shape[-1]):\n table5.cell(i+1,j).text = str(df5.values[i,j].round(4))\n #\n document.add_page_break()\n document.add_heading('Resultados', level=1)\n document.add_paragraph('Distorsiones de Entrepiso', style='Intense Quote')\n document.add_picture('./imagenes/distorsion_din.png', width=Inches(5.0))\n f6 = document.add_paragraph('Figura 6: Distorsión de entrepiso del análisis dinámico.')\n f6.alignment = WD_ALIGN_PARAGRAPH.CENTER\n\n file = 'Informe de Analisis Sismico.docx'\n document.save(file)\n import os\n os.startfile('%s'%file)","repo_name":"Julian-Palacios/ASEP","sub_path":"OSP_tools.py","file_name":"OSP_tools.py","file_ext":"py","file_size_in_byte":13805,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"}
+{"seq_id":"28731055222","text":"from jdbc.Connect import get_connection\nimport datetime\nimport pandas as pd\nimport numpy as np\nfrom jdbc.Convert_strTo_time_then_str import Convert_strTo_time_then_str\n\n\n# 输入:查询卡口的编号('HK-107');查询时段的开始和结束时间(字符串形式,'2019-10-08 16:00:00');车道编号格式:[('1','2'...)]\ndef Query_ls(conn, SSID, start_time, end_time, cdbh):\n if conn == None: conn = get_connection() # conn为None时,建立数据库连接\n cr = conn.cursor() # 建立查询游标\n # 从卡口流水表中查询给定卡口编号、时段和车道组编号的JGSJ\n query_sql = (\n \"SELECT JGSJ FROM SJCJ_T_CLXX_LS WHERE SSID = '%s' AND CDBH IN %s AND TO_CHAR(JGSJ,'YYYY-MM-DD HH24:MI:SS') BETWEEN '%s' AND '%s'\") % (\n SSID, cdbh[0], start_time, end_time)\n cr.execute(query_sql) # 执行查询\n query_res = cr.fetchall() # 提取查询结果,赋予变量query_res,查询结果形式:[(结果1),(结果2),(结果3)...]\n query_res = [i[0] for i in query_res] # 重新组织查询结果,变成[结果1,结果2,...]\n series_res = pd.Series(data=query_res, dtype='datetime64[ns]') # 将jgsj列转为Series格式,数据类型指定为datetime64\n result = pd.Series(np.ones(len(query_res)), index=series_res) # 将查询到的JGSJ时间列设置为索引,Series的值为1(索引—1辆车)\n ls_query_result = result.sort_index(ascending=True) # 将构建好的Series按索引升序排序(输出结果时间升序)\n return ls_query_result # 返回流水查询结果\n\n\n# 求x和y的最小公倍数\ndef lcm(x, y):\n # 获取最大的数\n if x > y:\n greater = x\n smaller = y\n else:\n greater = y\n smaller = x\n if smaller == 0: return 0 # 当x和y中有一个数位0时,返回0\n else:\n\n while (True):\n if ((greater % x == 0) and (greater % y == 0)):\n lcm = greater\n break\n greater += 1\n\n return lcm # 返回最小公倍数\n\n\n# 输入一个datetime格式的数,返回一个抹去秒值的整datetime(即datetime向下圆整)\ndef Round_datetime(date_time):\n tem = str(date_time)[:-2]+'00'\n tem = pd.to_datetime(tem, format='%Y-%m-%d %H:%M:%S')\n return tem # 返回结果的格式为 YYYY-MM-DD HH24:MI:SS\n\n\n# 统计流量函数(滑动时间窗),输入为流水查询结果,可任意输入统计的周期和滑动时间窗口长度,返回流量统计结果\ndef Flow_statistical(ls_query_result, timedelta, step_length):\n if timedelta >= step_length: # 判断统计时段是否大于等于滑动时间窗长度\n if step_length != 0: # 滑动时间窗步长不为0时\n low_cm = lcm(timedelta, step_length) # 滑动时间窗步长与统计周期的最小公倍数\n loop_num = int(low_cm / step_length) # 流量统计的循环次数\n else: loop_num = 1 # 步长为0,只统计1次(即 不滑动)\n str_timedelta = str(timedelta) + 'T' # 这里'T'表示抽样频率为分钟,其他时间频率可参看pd.resample()参数\n resample_list = [] # 重新抽样统计之后,结果存储列表\n for i in range(loop_num):\n sample_step_length = step_length * i # 第i次循环的滑动时间窗步长\n # 参数base指定整个序列滑动时间窗的起点(base参数的单位与str_timedelta相同,是一个int型的数)\n # label指定了重新抽样之后,时间标签显示为区间右侧\n resample_list.append(ls_query_result.resample(str_timedelta, base=sample_step_length, label='right').sum())\n result_tem = pd.concat(resample_list) # 将重新抽样统计后的结果合并(axis=0,竖直方向直接堆叠在一起)\n result_tem = result_tem.sort_index(ascending=True) # 按索引升序排序(输出结果时间升序)\n\n # 下面要微调统计结果,使得统计区间与滑动时间窗统计相符(去掉样本数不够的统计值)\n # 索引时间戳显示的是统计时间段闭区间右侧的值,所以开始的时间戳应为——向下圆整:流水查询结果的时间索引中的最小值 + 统计周期长度\n start_period = Round_datetime(ls_query_result.index.min()) + datetime.timedelta(minutes=timedelta)\n # 索引时间戳显示的是统计时间段闭区间右侧的值,所以结束的时间戳应为——向上圆整:流水查询结果的时间索引中的最大值\n end_period = Round_datetime(ls_query_result.index.max()) + datetime.timedelta(minutes=1)\n result = result_tem[(result_tem.index >= start_period) & (result_tem.index <= end_period)] # 按索引切片\n return result # 返回流量统计结果\n else:\n print(\"Error: 时间窗步长大于统计周期\") # 报错,不统计\n\n\nif __name__ == '__main__':\n starttime = datetime.datetime.now() # 统计程序的开始时刻\n\n conn = None\n query_result = Query_ls(conn, 'HK-107', '2019-10-08 16:00:00', '2019-10-08 17:00:00',[('1','2')])\n # print(result)\n # query_result.to_csv('query_ls.csv')\n # print(result.resample('5T', label='right').sum())\n flow_result = Flow_statistical(query_result, 5, 2) # 第二个参数表示:统计的周期(单位:min);第三个参数是:滑动时间窗的长度\n print(flow_result)\n\n endtime = datetime.datetime.now()\n print(\"the program runs : %d s\" % (endtime - starttime).seconds)\n","repo_name":"cf28920782519/XC","sub_path":"TrafficFlow.py","file_name":"TrafficFlow.py","file_ext":"py","file_size_in_byte":5538,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"3690477468","text":"from products import *\r\nfrom query_handler import *\r\n\r\n\r\ndef adding_product(product: products, query: QueryHandler):\r\n \"\"\"\r\n function that adds or updates a product if it already exists in the database\r\n \"\"\"\r\n query.execute_non_fetch(\r\n \"INSERT INTO products (barcode,products_name,quantity) VALUES(%s,%s,%s) ON DUPLICATE KEY UPDATE barcode=VALUES(barcode), products_name=VALUES(products_name),quantity=VALUES(quantity)\",\r\n (product.barcode, product.product_name, product.quantity))\r\n print(\"insert done\")\r\n\r\n\r\ndef delete_product(barcode: str, query: QueryHandler):\r\n \"\"\"\r\n function that deletes a product in the database according to the given barcode\r\n :param barcode: products barcode (special code)\r\n \"\"\"\r\n products = query.execute_fetch(\"SELECT * FROM products WHERE barcode=%s\", (barcode,))\r\n if len(products) != 0:\r\n query.execute_non_fetch(\"DELETE FROM products WHERE barcode=%s\", (barcode,))\r\n print(\"delete done\")\r\n else:\r\n print(\"Barcode doesn't exist\")\r\n\r\n\r\ndef show_all_products(query: QueryHandler):\r\n \"\"\"\r\n fucntion that prints the products table in the database\r\n \"\"\"\r\n # print(query.execute_fetch(\"SELECT * FROM products\", ()))\r\n products = query.execute_fetch(\"SELECT * FROM products\", ())\r\n for item in products:\r\n for k,v in item.items():\r\n print(k,\":\",v)\r\n print()\r\n\r\ndef start_Code(q):\r\n \"\"\"\r\n Function that either insert/update/delete or print products according to users input\r\n if user clicks 1 it inserts or updates the databse\r\n if user clicks 2 it deletes a product from database\r\n if user clicks 3 it prints all the table in the database\r\n if user clicks 4 it exists the code\r\n \"\"\"\r\n while True:\r\n option = input(\"\"\"enter your choice: \r\n Number 1: To insert or update a product.\r\n Number 2: To delete a product.\r\n Number 3: To print all the products\r\n Number 4: To exit\r\n my choice: \r\n \"\"\")\r\n\r\n if option == \"1\":\r\n print(\"please enter product details,, \")\r\n product = products(input(\"product barcode: \"), input(\"product name: \"),\r\n int(input(\"product quantity: \")))\r\n adding_product(product, q)\r\n elif option == \"2\":\r\n print(\"please enter product barcode,, \")\r\n barcode = input(\"product barcode: \")\r\n delete_product(barcode, q)\r\n elif option == \"3\":\r\n show_all_products(q)\r\n elif option == \"4\":\r\n print(\"Thank you for using our program. Exiting...\")\r\n break\r\n else:\r\n print(\"please enter only one option for the list [1,2,3,4]\")\r\n","repo_name":"EbraFH/Python-SQL-Assessment","sub_path":"products_handle.py","file_name":"products_handle.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"19505851044","text":"from datetime import datetime\r\nimport gzip\r\nimport os\r\nimport random\r\nimport shutil\r\nimport requests\r\nimport numpy as np\r\nimport urllib.request\r\n\r\ndef single_movie(ID, API_KEY):\r\n URL = \"http://www.omdbapi.com/?i={}&apikey={}\".format(ID, API_KEY)\r\n r = requests.get(URL)\r\n json = r.json()\r\n\r\n title, link, year, rating, runtime, genre, poster, desc, director = [json['Title']], [json['Poster']], [json['Year']],\\\r\n [json['Rated']], [json['Runtime']], \\\r\n [json['Genre']], [json['Poster']], [json['Plot']], [json['Director']]\r\n return title, link, year, rating, runtime, genre, poster, desc, director\r\n\r\n\r\ndef genries(query, API_KEY):\r\n \r\n URL = \"http://www.omdbapi.com/?s={}&apikey={}\".format(query, API_KEY)\r\n\r\n r = requests.get(URL)\r\n json = r.json()\r\n \r\n genre_dict = {}\r\n\r\n if json['Response'] == 'True' or json['Response'] == True:\r\n search_result = json['Search']\r\n for movie in search_result:\r\n \r\n _, _, _, _, _, genre_list, _, _, _ = single_movie(movie['imdbID'],API_KEY)\r\n\r\n for genre in genre_list[0].split(\",\"):\r\n if genre in genre_dict.keys():\r\n genre_dict[genre][0].append(movie['Poster'])\r\n genre_dict[genre][1].append(movie['Type'])\r\n genre_dict[genre][2].append(movie['imdbID'])\r\n genre_dict[genre][3].append(movie['Title'])\r\n else:\r\n genre_dict[genre] = [[],[],[],[]]\r\n gen = []\r\n for k,v in genre_dict.items():\r\n gen.append([k,np.array(v,dtype=object).T])\r\n return gen\r\n\r\ndef rand_search():\r\n searches = [\"love\", \"game\", \"biography\", \"science\", \"hate\", \"trouble\", \"care\", \"romance\", \"crime\", \"high school\"]\r\n rand = random.randint(0,9)\r\n return searches[rand]\r\n\r\ndef download():\r\n \r\n data_path = 'app/backend/data/'\r\n urllib.request.urlretrieve('https://datasets.imdbws.com/title.ratings.tsv.gz', data_path+'title.tsv.gz')\r\n\r\n with gzip.open(data_path+'title.tsv.gz', 'rb') as f:\r\n data = f.readlines()\r\n\r\n data = [f.decode(\"utf-8\").split('\\t')[0] for f in data]\r\n\r\n return data[::-1]\r\n\"\"\"\r\nif __name__ == \"__main__\":\r\n print(download())\"\"\"","repo_name":"anthony-chukwuemeka-nwachukwu/Movie-App","sub_path":"app/backend/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"39576390095","text":"from django.urls import path\nfrom . import views\n\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nfrom rest_framework import permissions\nfrom drf_yasg.views import get_schema_view\nfrom drf_yasg import openapi\n\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"Survey API\",\n default_version=\"v1.3\",\n ),\n public=True,\n permission_classes=(permissions.AllowAny,),\n)\n\napp_name = \"api_service\"\n\nurlpatterns = [\n path(\"\", schema_view.with_ui(\"swagger\", cache_timeout=0), name=\"schema-swagger-ui\"),\n path(\"questions/\", views.QuestionsList.as_view()),\n path(\"questions//\", views.QuestionDetail.as_view()),\n path(\"questions//options/\", views.OptionsList.as_view()),\n path(\n \"questions//options//\",\n views.OptionDetail.as_view(),\n ),\n path(\"users/\", views.UserList.as_view()),\n path(\"users//\", views.UserDetail.as_view()),\n]\n","repo_name":"renmarin/survey","sub_path":"api_service/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"19440801289","text":"import pickle, json\n\nprint('Задание 1. Реализуйте класс «Автомобиль».')\n\n\nclass ItemsCollection:\n @staticmethod\n def save_pikle(item, filename):\n with open(filename, 'wb') as f:\n pickle.dump(item, f)\n\n @staticmethod\n def load_pikle(filename):\n with open(filename, 'rb') as f:\n return pickle.load(f)\n\n @staticmethod\n def save_JSON(item, filename):\n with open(filename, 'w') as f:\n if isinstance(item, list) and all([isinstance(elm, ItemsCollection) for elm in item]):\n json_obj = []\n for elm in item:\n json_obj.append(f'{elm.__class__.__name__}(**{elm.__dict__})')\n json.dump(json_obj, f)\n elif isinstance(item, ItemsCollection):\n json.dump(f'{item.__class__.__name__}(**{item.__dict__})', f)\n\n @staticmethod\n def load_JSON(filename):\n with open(filename, 'r') as f:\n json_obj = json.load(f)\n if isinstance(json_obj, list):\n item = [eval(elm) for elm in json_obj]\n else:\n item = eval(json_obj)\n return item\n\n\nclass Car(ItemsCollection):\n VALID_KEYS = (\"model\", \"engine_volume\", \"automaker\", \"car_color\", \"year\")\n\n def __init__(self, model, year=None, car_color=None, automaker=None, engine_volume=None):\n self.model = model\n self.year = year\n self.car_color = car_color\n self.automaker = automaker\n self.engine_volume = engine_volume\n\n def set_info(self):\n self.model = input(\"Введите модель: \")\n self.year = input(\"Введите год выпуска: \")\n self.car_color = input(\"Введите цвет кузова: \")\n self.automaker = input(\"Введите производителя: \")\n self.engine_volume = input(\"Введите объем двигателя: \")\n\n def print_info(self):\n print(f'Автомобиль: {self.model}\\n\\t'\n f'объем двигателя: {self.engine_volume}\\n\\t'\n f'производитель: {self.automaker}\\n\\t'\n f'цвет кузова: {self.car_color}\\n\\t'\n f'год выпуска: {self.year};')\n\n def __setattr__(self, key, value):\n if key in self.VALID_KEYS and value != \"\":\n self.__dict__[key] = value\n elif key == \"model\":\n self.__dict__[key] = \"Undefined model\"\n elif key not in self.VALID_KEYS:\n raise AttributeError(\"Недопустимый атрибут\")\n else:\n raise ValueError(\"Значение не должно быть пустым\")\n\n def __repr__(self):\n return f\"{self.model}, {self.year}, {self.car_color}\"\n\n\na_car = Car(\"Mercedes SL350\", 2015, \"White\", \"MS\", 3500)\nb_car = Car(\"BMW 320D\", 2019, \"Black\", \"BMW\", 2000)\nc_car = Car(\"Toyota Camry\", 1998, \"Grey\", \"Toyota\", 2400)\na_car.save_pikle(a_car, \"a_car.pkl\")\nb_car.save_pikle(b_car, \"b_car.pkl\")\nItemsCollection.save_pikle(c_car, \"c_car.pkl\")\na2_car = ItemsCollection.load_pikle(\"a_car.pkl\")\nb2_car = Car.load_pikle(\"b_car.pkl\")\nc2_car = Car.load_pikle(\"c_car.pkl\")\nprint(a2_car, b2_car, c2_car, sep=\"\\n\")\na_car.save_JSON(a_car, \"a_car.json\")\ncars = [a_car, b_car, c_car]\nItemsCollection.save_pikle(cars, \"cars.pkl\")\npikle_cars = ItemsCollection.load_pikle(\"cars.pkl\")\nprint(\"pikle cars\")\nprint(*map(type, pikle_cars), pikle_cars)\nItemsCollection.save_JSON(cars, \"cars.json\")\ncars = ItemsCollection.load_JSON(\"cars.json\")\nprint(\"JSON cars\")\nprint(*map(type, cars), cars)\na3_car = ItemsCollection.load_JSON(\"a_car.json\")\nprint(a3_car, type(a3_car))\n\n# ==============================================================================\nprint('\\n\\nЗадание 2. Реализуйте класс «Книга».')\n\n\nclass Book(ItemsCollection):\n def __init__(self, name, author=None, year=None, publisher=None, genre=None):\n self.name = name\n self.author = author\n self.year = year\n self.publisher = publisher\n self.genre = genre\n\n def set_info(self):\n self.name = input(\"Введите название: \")\n self.author = input(\"Введите автора: \")\n self.year = input(\"Введите год издания: \")\n self.publisher = input(\"Введите издателя: \")\n self.genre = input(\"Введите жанр: \")\n\n def print_info(self):\n print(f'Книга: {self.name}\\n\\t'\n f'автор: {self.author}\\n\\t'\n f'год издания: {self.year}\\n\\t'\n f'издательство: {self.publisher}\\n\\t'\n f'жанр: {self.genre};')\n\n def __repr__(self):\n return f\"{self.name}, {self.author}, {self.year}, {self.genre}\"\n\n\na_book = Book('АРХИТЕКТУРА ЭВМ', 'Жмакин А.П.', 2010, genre=\"компьютерная литература\")\nb_book = Book('Мини-ЭВМ. Организация и программирование', 'Экхауз Р., Моррис Л.', 1983, genre=\"компьютерная литература\")\nc_book = Book('PDP-11. Архитектура и программирование', 'Фрэнк Томас', 1986, 'Радио и связь', \"компьютерная литература\")\na_book.save_pikle(a_book, \"a_book.pkl\")\na_2book = Book.load_pikle(\"a_book.pkl\")\nprint(a_2book, type(a_2book))\nprint(b_book)\nb_book.save_JSON(b_book, \"b_book.json\")\nb2_book = Book.load_JSON(\"b_book.json\")\nprint(b2_book, type(b2_book))\n\n# ==============================================================================\n\nprint('\\n\\nЗадание 3. Реализуйте класс «Стадион».')\n\n\nclass Stadium(ItemsCollection):\n def __init__(self, name, city=None, country=None, year=None, capacity=None):\n self.name = name\n self.city = city\n self.country = country\n self.year = year\n self.capacity = capacity\n\n def set_info(self):\n self.name = input(\"Введите название: \")\n self.city = input(\"Введите город: \")\n self.country = input(\"Введите страну: \")\n self.year = input(\"Введите год открытия: \")\n self.capacity = input(\"Введите вместимость: \")\n\n def print_info(self):\n print(f'Стадион: {self.name}\\n\\t'\n f'город: {self.city}\\n\\t'\n f'страна: {self.country}\\n\\t'\n f'год открытия: {self.year}\\n\\t'\n f'вместимость: {self.capacity};')\n\n\nstad_1 = Stadium(\"Arena\", \"Saint-Petersburg\", \"Russia\", 2010, 250000)\nstad_1.save_pikle(stad_1, \"stad1.pkl\")\nstad2 = ItemsCollection.load_pikle(\"stad1.pkl\")\nstad2.print_info()\nprint(type(stad2))\n","repo_name":"GolubinM/homeWork","sub_path":"28/HomeWork_28.py","file_name":"HomeWork_28.py","file_ext":"py","file_size_in_byte":6815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"72315621993","text":"class One():\n def meth1 (self, a, b):\n print(\"Line 1 - User 1\")\n a.meth2()\n b.meth3()\n\nclass Two():\n def meth2(self):\n print(\"Line 2 - User 2\")\n\nclass Tree():\n def meth3(self):\n print(\"Line 3 - User 3\")\n\nobj1 = One()\nobj2 = Two()\nobj3 = Tree()\n\nobj1.meth1(obj2, obj3)\n\n_________________________________\nclass One():\n c = 10\n def meth (self, a, b):\n print(self.c + a + b)\n\nclass Two():\n def meth(self, a):\n self.meth = len(str(a))\n return self.meth\n\n\nobj1 = One()\nobj2 = Two()\n\nobj1.meth(45, 55)\nprint(obj2.meth(100))\n\n________________________________\nclass Base():\n def __init__ (self, variable):\n self.result = variable\n def out(self):\n self.result = self.result * 5\n print(self.result)\n\nclass SubClass(Base):\n def out(self):\n print(\"\\n---\")\n Base.out(self)\n print(\"---\")\n\nobj1 = Base(15)\nobj2 = SubClass(30)\n\nobj1.out()\nobj2.out()\n\n_____________________________________________\nclass Base:\n def __init__ (self, variable):\n self.result = variable\n def out(self):\n print(self.result)\n\nclass SubClass(Base):\n def multiple(self, z):\n self.result *= z\n print(\"---\")\n\nclass SubSubClass(SubClass):\n def devide(self, q):\n self.result /= q\n print(\"....\")\n def out(self):\n print(self.result*100)\n\nobj1 = Base(15)\nobj2 = SubClass(15)\nobj3 = SubSubClass(9)\n\nobj1.out()\nobj2.multiple(5)\nobj2.out()\nobj3.devide(3)\nobj3.multiple(2)\nobj3.out()\n","repo_name":"alisatsar/itstep","sub_path":"Python/Lessons/class/subClass.py","file_name":"subClass.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"17634304890","text":"import os\nimport unittest\n\nimport ezplotly.settings as plotting_settings\nimport numpy as np\nfrom algo_ops.dependency.tester_util import clean_paths\nfrom algo_ops.ops.cv import ImageResult\nfrom algo_ops.ops.op import Op\nfrom ocr_ops.framework.op.result.ocr_result import OCRImageResult, OCRPipelineResult\nfrom ocr_ops.run_finding.interval import Interval\n\nfrom card_recognizer.classifier.core.card_prediction_result import (\n CardPredictionResult,\n CardPrediction,\n)\nfrom card_recognizer.classifier.core.word_classifier import WordClassifier\nfrom card_recognizer.reference.core.build import ReferenceBuild\n\n\nclass TestWordClassifier(unittest.TestCase):\n @staticmethod\n def _clean_env() -> None:\n clean_paths(\n dirs=(\"algo_ops_profile\",), files=(\"classify.txt\", \"classify_input.txt\")\n )\n\n def setUp(self) -> None:\n # suppress plotting\n plotting_settings.SUPPRESS_PLOTS = True\n\n # check that reference build has been set up\n self.master_model_pkl = ReferenceBuild.get_set_pkl_path(\n set_name=\"Brilliant Stars\"\n )\n self.assertTrue(os.path.exists(self.master_model_pkl))\n\n # setup input\n self.test_input = [\"Charizard\", \"fire\", \"burn\", \"fire\", \"spin\"]\n self._clean_env()\n\n def tearDown(self) -> None:\n self._clean_env()\n\n def test_end_to_end(self) -> None:\n \"\"\"\n Test end to end card prediction capability of WordClassifier.\n \"\"\"\n\n # init word classifier\n classifier = WordClassifier(ref_pkl_path=self.master_model_pkl)\n self.assertTrue(isinstance(classifier, Op))\n self.assertEqual(classifier.input, None)\n self.assertEqual(classifier.output, None)\n for method in (\n classifier.vis_profile,\n classifier.save_input,\n classifier.save_output,\n ):\n with self.assertRaises(ValueError):\n method()\n\n # test with test input List[str]\n output = classifier.exec(inp=[self.test_input])\n self.assertTrue(isinstance(classifier.input, list))\n self.assertTrue(isinstance(classifier.output, CardPredictionResult))\n self.assertEqual(classifier.input, [self.test_input])\n self.assertEqual(output, classifier.output)\n self.assertEqual(output.num_frames, 1)\n self.assertEqual(output.reference_set, \"Brilliant Stars\")\n self.assertEqual(output.unique_cards, [17])\n self.assertEqual(len(output), 1)\n self.assertTrue(isinstance(output[0], CardPrediction))\n self.assertEqual(output[0].card_index_in_reference, 17)\n self.assertEqual(output.input_path, None)\n\n # test vis and save input\n classifier.vis()\n classifier.vis_input()\n classifier.vis_profile()\n classifier.save_input()\n classifier.save_output()\n self.assertTrue(os.path.exists(\"classify.txt\"))\n self.assertTrue(os.path.exists(\"classify_input.txt\"))\n self.assertTrue(\n os.path.exists(os.path.join(\"algo_ops_profile\", \"classify.png\"))\n )\n\n # test input wrapped in OCRPipelineResult\n input_img = ImageResult(img=np.array([0.0]))\n ocr_image_results = [\n OCRImageResult.from_text_list(texts=self.test_input, input_img=input_img)\n ]\n ocr_pipeline_result = OCRPipelineResult(\n ocr_image_results=ocr_image_results, input_path=\"test.avi\"\n )\n output2 = classifier.exec(inp=ocr_pipeline_result)\n self.assertEqual(output2, classifier.output)\n self.assertEqual(output2.num_frames, 1)\n self.assertEqual(output2.reference_set, \"Brilliant Stars\")\n self.assertEqual(output2.unique_cards, [17])\n self.assertEqual(len(output2), 1)\n self.assertTrue(isinstance(output2[0], CardPrediction))\n self.assertEqual(output2[0].card_index_in_reference, 17)\n self.assertEqual(output2.input_path, \"test.avi\")\n\n def test_classify_multiple(self) -> None:\n \"\"\"\n Test classifying a run of length 2 of the same card.\n \"\"\"\n classifier = WordClassifier(ref_pkl_path=self.master_model_pkl)\n output = classifier.exec(inp=[self.test_input, self.test_input])\n self.assertTrue(isinstance(output, CardPredictionResult))\n self.assertEqual(len(output), 2)\n self.assertEqual(output.unique_cards, [17])\n self.assertEqual(len(output.runs), 1)\n self.assertEqual(output.runs[0].interval, Interval(start=0, end=2))\n self.assertEqual(output.runs[0].card_index, 17)\n self.assertEqual(output.input_path, None)\n","repo_name":"prateekt/pokemon-card-recognizer","sub_path":"card_recognizer/classifier/test/test_word_classifier.py","file_name":"test_word_classifier.py","file_ext":"py","file_size_in_byte":4626,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"14201851531","text":"def efficientprime(number):\n if number <= 3:\n return number > 1\n elif number % 2 == 0 or number % 3 == 0:\n return False\n i = 5\n while i ** 2 <= number:\n if number % i == 0 or number % (i + 2) == 0:\n return False\n i += 6\n return True\n\n\nprimes = [2]\n\nfound = False\ni = 9\nwhile not found:\n n = max(primes)\n while max(primes) < i:\n n += 1\n primes.append(n) if efficientprime(n) else None\n\n abides = False\n for prime in primes[:len(primes)-1][::-1]:\n if (((i - prime) / 2) ** 0.5).is_integer():\n abides = True\n break\n\n if not abides:\n result = i\n found = False\n break\n else:\n i += 2\n while efficientprime(i):\n i += 2\n\nprint(\"Result: %s\" % result)\n","repo_name":"Lordfirespeed/BunchaPythonStuff","sub_path":"Project Euler/#46 - Goldbach's Other Conjecture.py","file_name":"#46 - Goldbach's Other Conjecture.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"23644741534","text":"import datetime\nimport json\nimport logging\nimport os\nimport re\nimport tarfile\nimport tempfile\nfrom unittest.mock import Mock, patch\n\nimport pytest\nfrom botocore.exceptions import ClientError\n\nfrom braket.aws import AwsQuantumJob, AwsSession\nfrom braket.aws.queue_information import HybridJobQueueInfo\n\n\n@pytest.fixture\ndef aws_session(quantum_job_arn, job_region):\n _aws_session = Mock(spec=AwsSession)\n _aws_session.create_job.return_value = quantum_job_arn\n _aws_session.default_bucket.return_value = \"default-bucket-name\"\n _aws_session.get_default_jobs_role.return_value = \"default-role-arn\"\n _aws_session.construct_s3_uri.side_effect = (\n lambda bucket, *dirs: f\"s3://{bucket}/{'/'.join(dirs)}\"\n )\n\n def fake_copy_session(region):\n _aws_session.region = region\n return _aws_session\n\n _aws_session.copy_session.side_effect = fake_copy_session\n _aws_session.list_keys.return_value = [\"job-path/output/model.tar.gz\"]\n _aws_session.region = job_region\n\n _braket_client_mock = Mock(meta=Mock(region_name=job_region))\n _aws_session.braket_client = _braket_client_mock\n return _aws_session\n\n\n@pytest.fixture\ndef generate_get_job_response():\n def _get_job_response(**kwargs):\n response = {\n \"ResponseMetadata\": {\n \"RequestId\": \"d223b1a0-ee5c-4c75-afa7-3c29d5338b62\",\n \"HTTPStatusCode\": 200,\n },\n \"algorithmSpecification\": {\n \"scriptModeConfig\": {\n \"entryPoint\": \"my_file:start_here\",\n \"s3Uri\": \"s3://amazon-braket-jobs/job-path/my_file.py\",\n }\n },\n \"checkpointConfig\": {\n \"localPath\": \"/opt/omega/checkpoints\",\n \"s3Uri\": \"s3://amazon-braket-jobs/job-path/checkpoints\",\n },\n \"createdAt\": datetime.datetime(2021, 6, 28, 21, 4, 51),\n \"deviceConfig\": {\n \"device\": \"arn:aws:braket:::device/qpu/rigetti/Aspen-10\",\n },\n \"hyperParameters\": {\n \"foo\": \"bar\",\n },\n \"inputDataConfig\": [\n {\n \"channelName\": \"training_input\",\n \"dataSource\": {\n \"s3DataSource\": {\n \"s3Uri\": \"s3://amazon-braket-jobs/job-path/input\",\n }\n },\n }\n ],\n \"instanceConfig\": {\n \"instanceCount\": 1,\n \"instanceType\": \"ml.m5.large\",\n \"volumeSizeInGb\": 1,\n },\n \"jobArn\": \"arn:aws:braket:us-west-2:875981177017:job/job-test-20210628140446\",\n \"jobName\": \"job-test-20210628140446\",\n \"outputDataConfig\": {\"s3Path\": \"s3://amazon-braket-jobs/job-path/data\"},\n \"roleArn\": \"arn:aws:iam::875981177017:role/AmazonBraketJobRole\",\n \"status\": \"RUNNING\",\n \"stoppingCondition\": {\"maxRuntimeInSeconds\": 1200},\n }\n response.update(kwargs)\n\n return response\n\n return _get_job_response\n\n\n@pytest.fixture\ndef generate_cancel_job_response():\n def _cancel_job_response(**kwargs):\n response = {\n \"ResponseMetadata\": {\n \"RequestId\": \"857b0893-2073-4ad6-b828-744af8400dfe\",\n \"HTTPStatusCode\": 200,\n },\n \"cancellationStatus\": \"CANCELLING\",\n \"jobArn\": \"arn:aws:braket:us-west-2:875981177017:job/job-test-20210628140446\",\n }\n response.update(kwargs)\n return response\n\n return _cancel_job_response\n\n\n@pytest.fixture\ndef quantum_job_name():\n return \"job-test-20210628140446\"\n\n\n@pytest.fixture\ndef job_region():\n return \"us-west-2\"\n\n\n@pytest.fixture\ndef quantum_job_arn(quantum_job_name, job_region):\n return f\"arn:aws:braket:{job_region}:875981177017:job/{quantum_job_name}\"\n\n\n@pytest.fixture\ndef quantum_job(quantum_job_arn, aws_session):\n return AwsQuantumJob(quantum_job_arn, aws_session)\n\n\ndef test_equality(quantum_job_arn, aws_session, job_region):\n new_aws_session = Mock(region=job_region)\n quantum_job_1 = AwsQuantumJob(quantum_job_arn, aws_session)\n quantum_job_2 = AwsQuantumJob(quantum_job_arn, aws_session)\n quantum_job_3 = AwsQuantumJob(quantum_job_arn, new_aws_session)\n other_quantum_job = AwsQuantumJob(\n \"arn:aws:braket:us-west-2:875981177017:job/other-job\", aws_session\n )\n non_quantum_job = quantum_job_1.arn\n\n assert quantum_job_1 == quantum_job_2\n assert quantum_job_1 == quantum_job_3\n assert quantum_job_1 is not quantum_job_2\n assert quantum_job_1 is not quantum_job_3\n assert quantum_job_1 is quantum_job_1\n assert quantum_job_1 != other_quantum_job\n assert quantum_job_1 != non_quantum_job\n\n\ndef test_hash(quantum_job):\n assert hash(quantum_job) == hash(quantum_job.arn)\n\n\n@pytest.mark.parametrize(\n \"arn, expected_region\",\n [\n (\"arn:aws:braket:us-west-2:875981177017:job/job-name\", \"us-west-2\"),\n (\"arn:aws:braket:us-west-1:1234567890:job/job-name\", \"us-west-1\"),\n ],\n)\n@patch(\"braket.aws.aws_quantum_job.boto3.Session\")\n@patch(\"braket.aws.aws_quantum_job.AwsSession\")\ndef test_quantum_job_constructor_default_session(\n aws_session_mock, mock_session, arn, expected_region\n):\n mock_boto_session = Mock()\n aws_session_mock.return_value = Mock()\n mock_session.return_value = mock_boto_session\n job = AwsQuantumJob(arn)\n mock_session.assert_called_with(region_name=expected_region)\n aws_session_mock.assert_called_with(boto_session=mock_boto_session)\n assert job.arn == arn\n assert job._aws_session == aws_session_mock.return_value\n\n\ndef test_quantum_job_constructor_invalid_region(aws_session):\n region_mismatch = \"The aws session region does not match the region for the supplied arn.\"\n arn = \"arn:aws:braket:unknown-region:875981177017:job/quantum_job_name\"\n with pytest.raises(ValueError, match=region_mismatch):\n AwsQuantumJob(arn, aws_session)\n\n\n@patch(\"braket.aws.aws_quantum_job.boto3.Session\")\ndef test_quantum_job_constructor_explicit_session(mock_session, quantum_job_arn, job_region):\n aws_session_mock = Mock(region=job_region)\n job = AwsQuantumJob(quantum_job_arn, aws_session_mock)\n assert job._aws_session == aws_session_mock\n assert job.arn == quantum_job_arn\n mock_session.assert_not_called()\n\n\ndef test_metadata(quantum_job, aws_session, generate_get_job_response, quantum_job_arn):\n get_job_response_running = generate_get_job_response(status=\"RUNNING\")\n aws_session.get_job.return_value = get_job_response_running\n assert quantum_job.metadata() == get_job_response_running\n aws_session.get_job.assert_called_with(quantum_job_arn)\n\n get_job_response_completed = generate_get_job_response(status=\"COMPLETED\")\n aws_session.get_job.return_value = get_job_response_completed\n assert quantum_job.metadata() == get_job_response_completed\n aws_session.get_job.assert_called_with(quantum_job_arn)\n assert aws_session.get_job.call_count == 2\n\n\ndef test_metadata_caching(quantum_job, aws_session, generate_get_job_response, quantum_job_arn):\n get_job_response_running = generate_get_job_response(status=\"RUNNING\")\n aws_session.get_job.return_value = get_job_response_running\n assert quantum_job.metadata(True) == get_job_response_running\n\n get_job_response_completed = generate_get_job_response(status=\"COMPLETED\")\n aws_session.get_job.return_value = get_job_response_completed\n assert quantum_job.metadata(True) == get_job_response_running\n aws_session.get_job.assert_called_with(quantum_job_arn)\n assert aws_session.get_job.call_count == 1\n\n\ndef test_queue_position(quantum_job, aws_session, generate_get_job_response):\n state_1 = \"COMPLETED\"\n queue_info = {\n \"queue\": \"JOBS_QUEUE\",\n \"position\": \"None\",\n \"message\": \"Job is in COMPLETED status. \"\n \"AmazonBraket does not show queue position for this status.\",\n }\n get_job_response_completed = generate_get_job_response(status=state_1, queueInfo=queue_info)\n aws_session.get_job.return_value = get_job_response_completed\n assert quantum_job.queue_position() == HybridJobQueueInfo(\n queue_position=None, message=queue_info[\"message\"]\n )\n\n state_2 = \"QUEUED\"\n queue_info = {\"queue\": \"JOBS_QUEUE\", \"position\": \"2\"}\n get_job_response_queued = generate_get_job_response(status=state_2, queueInfo=queue_info)\n aws_session.get_job.return_value = get_job_response_queued\n assert quantum_job.queue_position() == HybridJobQueueInfo(queue_position=\"2\", message=None)\n\n\ndef test_state(quantum_job, aws_session, generate_get_job_response, quantum_job_arn):\n state_1 = \"RUNNING\"\n get_job_response_running = generate_get_job_response(status=state_1)\n aws_session.get_job.return_value = get_job_response_running\n assert quantum_job.state() == state_1\n aws_session.get_job.assert_called_with(quantum_job_arn)\n\n state_2 = \"COMPLETED\"\n get_job_response_completed = generate_get_job_response(status=state_2)\n aws_session.get_job.return_value = get_job_response_completed\n assert quantum_job.state() == state_2\n aws_session.get_job.assert_called_with(quantum_job_arn)\n assert aws_session.get_job.call_count == 2\n\n\ndef test_state_caching(quantum_job, aws_session, generate_get_job_response, quantum_job_arn):\n state_1 = \"RUNNING\"\n get_job_response_running = generate_get_job_response(status=state_1)\n aws_session.get_job.return_value = get_job_response_running\n assert quantum_job.state(True) == state_1\n\n state_2 = \"COMPLETED\"\n get_job_response_completed = generate_get_job_response(status=state_2)\n aws_session.get_job.return_value = get_job_response_completed\n assert quantum_job.state(True) == state_1\n aws_session.get_job.assert_called_with(quantum_job_arn)\n assert aws_session.get_job.call_count == 1\n\n\n@pytest.fixture()\ndef result_setup(quantum_job_name):\n with tempfile.TemporaryDirectory() as temp_dir:\n os.chdir(temp_dir)\n file_path = \"results.json\"\n\n with open(file_path, \"w\") as write_file:\n write_file.write(\n json.dumps(\n {\n \"braketSchemaHeader\": {\n \"name\": \"braket.jobs_data.persisted_job_data\",\n \"version\": \"1\",\n },\n \"dataDictionary\": {\"converged\": True, \"energy\": -0.2},\n \"dataFormat\": \"plaintext\",\n }\n )\n )\n\n with tarfile.open(\"model.tar.gz\", \"w:gz\") as tar:\n tar.add(file_path, arcname=os.path.basename(file_path))\n\n yield\n\n result_dir = f\"{os.getcwd()}/{quantum_job_name}\"\n\n if os.path.exists(result_dir):\n os.remove(f\"{result_dir}/results.json\")\n os.rmdir(f\"{result_dir}/\")\n\n if os.path.isfile(\"model.tar.gz\"):\n os.remove(\"model.tar.gz\")\n\n os.chdir(\"..\")\n\n\n@pytest.mark.parametrize(\"state\", sorted(AwsQuantumJob.TERMINAL_STATES))\ndef test_results_when_job_is_completed(\n quantum_job, aws_session, generate_get_job_response, result_setup, state\n):\n expected_saved_data = {\"converged\": True, \"energy\": -0.2}\n\n get_job_response_completed = generate_get_job_response(status=state)\n quantum_job._aws_session.get_job.return_value = get_job_response_completed\n actual_data = quantum_job.result()\n\n job_metadata = quantum_job.metadata(True)\n s3_path = job_metadata[\"outputDataConfig\"][\"s3Path\"]\n\n output_bucket_uri = f\"{s3_path}/output/model.tar.gz\"\n quantum_job._aws_session.download_from_s3.assert_called_with(\n s3_uri=output_bucket_uri, filename=\"model.tar.gz\"\n )\n assert actual_data == expected_saved_data\n\n\ndef test_download_result_when_job_is_running(\n quantum_job, aws_session, generate_get_job_response, result_setup\n):\n poll_timeout_seconds, poll_interval_seconds, state = 1, 0.5, \"RUNNING\"\n get_job_response_completed = generate_get_job_response(status=state)\n aws_session.get_job.return_value = get_job_response_completed\n job_metadata = quantum_job.metadata(True)\n\n with pytest.raises(\n TimeoutError,\n match=f\"{job_metadata['jobName']}: Polling for job completion \"\n f\"timed out after {poll_timeout_seconds} seconds.\",\n ):\n quantum_job.download_result(\n poll_timeout_seconds=poll_timeout_seconds, poll_interval_seconds=poll_interval_seconds\n )\n\n\ndef test_download_result_when_extract_path_not_provided(\n quantum_job, generate_get_job_response, aws_session, result_setup\n):\n state = \"COMPLETED\"\n expected_saved_data = {\"converged\": True, \"energy\": -0.2}\n get_job_response_completed = generate_get_job_response(status=state)\n quantum_job._aws_session.get_job.return_value = get_job_response_completed\n job_metadata = quantum_job.metadata(True)\n job_name = job_metadata[\"jobName\"]\n quantum_job.download_result()\n\n with open(f\"{job_name}/results.json\", \"r\") as file:\n actual_data = json.loads(file.read())[\"dataDictionary\"]\n assert expected_saved_data == actual_data\n\n\ndef test_download_result_when_extract_path_provided(\n quantum_job, generate_get_job_response, aws_session, result_setup\n):\n expected_saved_data = {\"converged\": True, \"energy\": -0.2}\n state = \"COMPLETED\"\n get_job_response_completed = generate_get_job_response(status=state)\n aws_session.get_job.return_value = get_job_response_completed\n job_metadata = quantum_job.metadata(True)\n job_name = job_metadata[\"jobName\"]\n\n with tempfile.TemporaryDirectory() as temp_dir:\n quantum_job.download_result(temp_dir)\n\n with open(f\"{temp_dir}/{job_name}/results.json\", \"r\") as file:\n actual_data = json.loads(file.read())[\"dataDictionary\"]\n assert expected_saved_data == actual_data\n\n\ndef test_empty_dict_returned_when_result_not_saved(\n quantum_job, generate_get_job_response, aws_session\n):\n state = \"COMPLETED\"\n get_job_response_completed = generate_get_job_response(status=state)\n aws_session.get_job.return_value = get_job_response_completed\n\n exception_response = {\n \"Error\": {\n \"Code\": \"404\",\n \"Message\": \"Not Found\",\n }\n }\n quantum_job._aws_session.download_from_s3 = Mock(\n side_effect=ClientError(exception_response, \"HeadObject\")\n )\n assert quantum_job.result() == {}\n\n\ndef test_results_not_in_s3_for_download(quantum_job, generate_get_job_response, aws_session):\n state = \"COMPLETED\"\n get_job_response_completed = generate_get_job_response(status=state)\n aws_session.get_job.return_value = get_job_response_completed\n job_metadata = quantum_job.metadata(True)\n output_s3_path = job_metadata[\"outputDataConfig\"][\"s3Path\"]\n\n error_message = f\"Error retrieving results, could not find results at '{output_s3_path}\"\n\n exception_response = {\n \"Error\": {\n \"Code\": \"404\",\n \"Message\": \"Not Found\",\n }\n }\n quantum_job._aws_session.download_from_s3 = Mock(\n side_effect=ClientError(exception_response, \"HeadObject\")\n )\n with pytest.raises(ClientError, match=error_message):\n quantum_job.download_result()\n\n\ndef test_results_raises_error_for_non_404_errors(\n quantum_job, generate_get_job_response, aws_session\n):\n state = \"COMPLETED\"\n get_job_response_completed = generate_get_job_response(status=state)\n aws_session.get_job.return_value = get_job_response_completed\n\n error = \"An error occurred \\\\(402\\\\) when calling the SomeObject operation: Something\"\n\n exception_response = {\n \"Error\": {\n \"Code\": \"402\",\n \"Message\": \"Something\",\n }\n }\n quantum_job._aws_session.download_from_s3 = Mock(\n side_effect=ClientError(exception_response, \"SomeObject\")\n )\n with pytest.raises(ClientError, match=error):\n quantum_job.result()\n\n\n@patch(\"braket.aws.aws_quantum_job.AwsQuantumJob.download_result\")\ndef test_results_json_file_not_in_tar(\n result_download, quantum_job, aws_session, generate_get_job_response\n):\n state = \"COMPLETED\"\n get_job_response_completed = generate_get_job_response(status=state)\n quantum_job._aws_session.get_job.return_value = get_job_response_completed\n assert quantum_job.result() == {}\n\n\n@pytest.fixture\ndef entry_point():\n return \"test-source-module.entry_point:func\"\n\n\n@pytest.fixture\ndef bucket():\n return \"braket-region-id\"\n\n\n@pytest.fixture(\n params=[\n None,\n \"aws.location/custom-jobs:tag.1.2.3\",\n \"other.uri/custom-name:tag\",\n \"other-custom-format.com\",\n ]\n)\ndef image_uri(request):\n return request.param\n\n\n@pytest.fixture(params=[\"given_job_name\", \"default_job_name\"])\ndef job_name(request):\n if request.param == \"given_job_name\":\n return \"test-job-name\"\n\n\n@pytest.fixture\ndef s3_prefix(job_name):\n return f\"{job_name}/non-default\"\n\n\n@pytest.fixture(params=[\"local_source\", \"s3_source\"])\ndef source_module(request, bucket, s3_prefix):\n if request.param == \"local_source\":\n return \"test-source-module\"\n elif request.param == \"s3_source\":\n return AwsSession.construct_s3_uri(bucket, \"test-source-prefix\", \"source.tar.gz\")\n\n\n@pytest.fixture\ndef role_arn():\n return \"arn:aws:iam::0000000000:role/AmazonBraketInternalSLR\"\n\n\n@pytest.fixture(\n params=[\n \"arn:aws:braket:us-west-2::device/qpu/test/device-name\",\n \"arn:aws:braket:::device/qpu/test/device-name\",\n ]\n)\ndef device_arn(request):\n return request.param\n\n\n@pytest.fixture\ndef reservation_arn():\n return \"arn:aws:braket:us-west-2:123456789123:reservation/a1b123cd-45e6-789f-gh01-i234567jk8l9\"\n\n\n@pytest.fixture\ndef prepare_job_args(aws_session, device_arn, reservation_arn):\n return {\n \"device\": device_arn,\n \"source_module\": Mock(),\n \"entry_point\": Mock(),\n \"image_uri\": Mock(),\n \"job_name\": Mock(),\n \"code_location\": Mock(),\n \"role_arn\": Mock(),\n \"hyperparameters\": Mock(),\n \"input_data\": Mock(),\n \"instance_config\": Mock(),\n \"distribution\": Mock(),\n \"stopping_condition\": Mock(),\n \"output_data_config\": Mock(),\n \"copy_checkpoints_from_job\": Mock(),\n \"checkpoint_config\": Mock(),\n \"aws_session\": aws_session,\n \"tags\": Mock(),\n \"reservation_arn\": reservation_arn,\n }\n\n\ndef test_str(quantum_job):\n expected = f\"AwsQuantumJob('arn':'{quantum_job.arn}')\"\n assert str(quantum_job) == expected\n\n\ndef test_arn(quantum_job_arn, aws_session):\n quantum_job = AwsQuantumJob(quantum_job_arn, aws_session)\n assert quantum_job.arn == quantum_job_arn\n\n\ndef test_name(quantum_job_arn, quantum_job_name, aws_session):\n quantum_job = AwsQuantumJob(quantum_job_arn, aws_session)\n assert quantum_job.name == quantum_job_name\n\n\ndef test_no_arn_setter(quantum_job):\n # Python 3.11 error output differs from Python 3.10 <=\n with pytest.raises(AttributeError):\n quantum_job.arn = 123\n\n\n@pytest.mark.parametrize(\"wait_until_complete\", [True, False])\n@patch(\"braket.aws.aws_quantum_job.AwsQuantumJob.logs\")\n@patch(\"braket.aws.aws_quantum_job.prepare_quantum_job\")\ndef test_create_job(\n mock_prepare_quantum_job,\n mock_logs,\n aws_session,\n prepare_job_args,\n quantum_job_arn,\n wait_until_complete,\n):\n test_response_args = {\"testArgs\": \"MyTestArg\"}\n mock_prepare_quantum_job.return_value = test_response_args\n job = AwsQuantumJob.create(wait_until_complete=wait_until_complete, **prepare_job_args)\n mock_prepare_quantum_job.assert_called_with(**prepare_job_args)\n aws_session.create_job.assert_called_with(**test_response_args)\n if wait_until_complete:\n mock_logs.assert_called_once()\n else:\n mock_logs.assert_not_called()\n assert job.arn == quantum_job_arn\n\n\ndef test_create_fake_arg():\n unexpected_kwarg = \"create\\\\(\\\\) got an unexpected keyword argument 'fake_arg'\"\n with pytest.raises(TypeError, match=unexpected_kwarg):\n AwsQuantumJob.create(\n device=\"device\",\n source_module=\"source\",\n fake_arg=\"fake_value\",\n )\n\n\ndef test_cancel_job(quantum_job_arn, aws_session, generate_cancel_job_response):\n cancellation_status = \"CANCELLING\"\n aws_session.cancel_job.return_value = generate_cancel_job_response(\n cancellationStatus=cancellation_status\n )\n quantum_job = AwsQuantumJob(quantum_job_arn, aws_session)\n status = quantum_job.cancel()\n aws_session.cancel_job.assert_called_with(quantum_job_arn)\n assert status == cancellation_status\n\n\ndef test_cancel_job_surfaces_exception(quantum_job, aws_session):\n exception_response = {\n \"Error\": {\n \"Code\": \"ValidationException\",\n \"Message\": \"unit-test-error\",\n }\n }\n error_string = re.escape(\n \"An error occurred (ValidationException) when calling the \"\n \"cancel_job operation: unit-test-error\"\n )\n aws_session.cancel_job.side_effect = ClientError(exception_response, \"cancel_job\")\n with pytest.raises(ClientError, match=error_string):\n quantum_job.cancel()\n\n\n@pytest.mark.parametrize(\n \"generate_get_job_response_kwargs\",\n [\n {\n \"status\": \"RUNNING\",\n },\n {\n \"status\": \"COMPLETED\",\n },\n {\n \"status\": \"COMPLETED\",\n \"startedAt\": datetime.datetime(2021, 1, 1, 1, 0, 0, 0),\n },\n {\"status\": \"COMPLETED\", \"endedAt\": datetime.datetime(2021, 1, 1, 1, 0, 0, 0)},\n {\n \"status\": \"COMPLETED\",\n \"startedAt\": datetime.datetime(2021, 1, 1, 1, 0, 0, 0),\n \"endedAt\": datetime.datetime(2021, 1, 1, 1, 0, 0, 0),\n },\n ],\n)\n@patch(\n \"braket.jobs.metrics_data.cwl_insights_metrics_fetcher.\"\n \"CwlInsightsMetricsFetcher.get_metrics_for_job\"\n)\ndef test_metrics(\n metrics_fetcher_mock,\n quantum_job,\n aws_session,\n generate_get_job_response,\n generate_get_job_response_kwargs,\n):\n get_job_response_running = generate_get_job_response(**generate_get_job_response_kwargs)\n aws_session.get_job.return_value = get_job_response_running\n\n expected_metrics = {\"Test\": [1]}\n metrics_fetcher_mock.return_value = expected_metrics\n metrics = quantum_job.metrics()\n assert metrics == expected_metrics\n\n\n@pytest.fixture\ndef log_stream_responses():\n return (\n ClientError(\n {\n \"Error\": {\n \"Code\": \"ResourceNotFoundException\",\n \"Message\": \"This shouldn't get raised...\",\n }\n },\n \"DescribeLogStreams\",\n ),\n {\"logStreams\": []},\n {\"logStreams\": [{\"logStreamName\": \"stream-1\"}]},\n )\n\n\n@pytest.fixture\ndef log_events_responses():\n return (\n {\"nextForwardToken\": None, \"events\": [{\"timestamp\": 1, \"message\": \"hi there #1\"}]},\n {\"nextForwardToken\": None, \"events\": []},\n {\n \"nextForwardToken\": None,\n \"events\": [\n {\"timestamp\": 1, \"message\": \"hi there #1\"},\n {\"timestamp\": 2, \"message\": \"hi there #2\"},\n ],\n },\n {\"nextForwardToken\": None, \"events\": []},\n {\n \"nextForwardToken\": None,\n \"events\": [\n {\"timestamp\": 2, \"message\": \"hi there #2\"},\n {\"timestamp\": 2, \"message\": \"hi there #2a\"},\n {\"timestamp\": 3, \"message\": \"hi there #3\"},\n ],\n },\n {\"nextForwardToken\": None, \"events\": []},\n )\n\n\ndef test_logs(\n quantum_job,\n generate_get_job_response,\n log_events_responses,\n log_stream_responses,\n capsys,\n):\n quantum_job._aws_session.get_job.side_effect = (\n generate_get_job_response(status=\"RUNNING\"),\n generate_get_job_response(status=\"RUNNING\"),\n generate_get_job_response(status=\"RUNNING\"),\n generate_get_job_response(status=\"COMPLETED\"),\n )\n quantum_job._aws_session.describe_log_streams.side_effect = log_stream_responses\n quantum_job._aws_session.get_log_events.side_effect = log_events_responses\n\n quantum_job.logs(wait=True, poll_interval_seconds=0)\n\n captured = capsys.readouterr()\n assert captured.out == \"\\n\".join(\n (\n \"..\",\n \"hi there #1\",\n \"hi there #2\",\n \"hi there #2a\",\n \"hi there #3\",\n \"\",\n )\n )\n\n\n@patch.dict(\"os.environ\", {\"JPY_PARENT_PID\": \"True\"})\ndef test_logs_multiple_instances(\n quantum_job,\n generate_get_job_response,\n log_events_responses,\n log_stream_responses,\n capsys,\n):\n quantum_job._aws_session.get_job.side_effect = (\n generate_get_job_response(status=\"RUNNING\", instanceConfig={\"instanceCount\": 2}),\n generate_get_job_response(status=\"RUNNING\"),\n generate_get_job_response(status=\"RUNNING\"),\n generate_get_job_response(status=\"RUNNING\"),\n generate_get_job_response(status=\"COMPLETED\"),\n )\n log_stream_responses[-1][\"logStreams\"].append({\"logStreamName\": \"stream-2\"})\n quantum_job._aws_session.describe_log_streams.side_effect = log_stream_responses\n\n event_counts = {\n \"stream-1\": 0,\n \"stream-2\": 0,\n }\n\n def get_log_events(log_group, log_stream, start_time, start_from_head, next_token):\n log_events_dict = {\n \"stream-1\": log_events_responses,\n \"stream-2\": log_events_responses,\n }\n log_events_dict[\"stream-1\"] += (\n {\n \"nextForwardToken\": None,\n \"events\": [],\n },\n {\n \"nextForwardToken\": None,\n \"events\": [],\n },\n )\n log_events_dict[\"stream-2\"] += (\n {\n \"nextForwardToken\": None,\n \"events\": [\n {\"timestamp\": 3, \"message\": \"hi there #3\"},\n {\"timestamp\": 4, \"message\": \"hi there #4\"},\n ],\n },\n {\n \"nextForwardToken\": None,\n \"events\": [],\n },\n )\n event_counts[log_stream] += 1\n return log_events_dict[log_stream][event_counts[log_stream]]\n\n quantum_job._aws_session.get_log_events.side_effect = get_log_events\n\n quantum_job.logs(wait=True, poll_interval_seconds=0)\n\n captured = capsys.readouterr()\n assert captured.out == \"\\n\".join(\n (\n \"..\",\n \"\\x1b[34mhi there #1\\x1b[0m\",\n \"\\x1b[35mhi there #1\\x1b[0m\",\n \"\\x1b[34mhi there #2\\x1b[0m\",\n \"\\x1b[35mhi there #2\\x1b[0m\",\n \"\\x1b[34mhi there #2a\\x1b[0m\",\n \"\\x1b[35mhi there #2a\\x1b[0m\",\n \"\\x1b[34mhi there #3\\x1b[0m\",\n \"\\x1b[35mhi there #3\\x1b[0m\",\n \"\\x1b[35mhi there #4\\x1b[0m\",\n \"\",\n )\n )\n\n\ndef test_logs_error(quantum_job, generate_get_job_response, capsys):\n quantum_job._aws_session.get_job.side_effect = (\n generate_get_job_response(status=\"RUNNING\"),\n generate_get_job_response(status=\"RUNNING\"),\n generate_get_job_response(status=\"COMPLETED\"),\n )\n quantum_job._aws_session.describe_log_streams.side_effect = (\n ClientError(\n {\n \"Error\": {\n \"Code\": \"UnknownCode\",\n \"Message\": \"Some error message\",\n }\n },\n \"DescribeLogStreams\",\n ),\n )\n\n with pytest.raises(ClientError, match=\"Some error message\"):\n quantum_job.logs(wait=True, poll_interval_seconds=0)\n\n\ndef test_initialize_session_for_valid_non_regional_device(aws_session, caplog):\n device_arn = \"arn:aws:braket:::device/qpu/test/device-name\"\n first_region = aws_session.region\n logger = logging.getLogger(__name__)\n\n aws_session.get_device.side_effect = [\n ClientError(\n {\n \"Error\": {\n \"Code\": \"ResourceNotFoundException\",\n }\n },\n \"getDevice\",\n ),\n ClientError(\n {\n \"Error\": {\n \"Code\": \"ResourceNotFoundException\",\n }\n },\n \"getDevice\",\n ),\n device_arn,\n ]\n\n caplog.set_level(logging.INFO)\n AwsQuantumJob._initialize_session(aws_session, device_arn, logger)\n\n assert f\"Changed session region from '{first_region}' to '{aws_session.region}'\" in caplog.text\n\n\ndef test_initialize_session_for_valid_regional_device(aws_session, caplog):\n device_arn = f\"arn:aws:braket:{aws_session.region}::device/qpu/test/device-name\"\n logger = logging.getLogger(__name__)\n aws_session.get_device.return_value = device_arn\n caplog.set_level(logging.INFO)\n AwsQuantumJob._initialize_session(aws_session, device_arn, logger)\n assert not caplog.text\n\n\n@pytest.mark.parametrize(\n \"get_device_side_effect, expected_exception\",\n [\n (\n [\n ClientError(\n {\n \"Error\": {\n \"Code\": \"ResourceNotFoundException\",\n }\n },\n \"getDevice\",\n )\n ],\n ValueError,\n ),\n (\n [\n ClientError(\n {\n \"Error\": {\n \"Code\": \"ThrottlingException\",\n }\n },\n \"getDevice\",\n )\n ],\n ClientError,\n ),\n ],\n)\ndef test_regional_device_raises_error(\n get_device_side_effect, expected_exception, aws_session, caplog\n):\n device_arn = f\"arn:aws:braket:{aws_session.region}::device/qpu/test/device-name\"\n aws_session.get_device.side_effect = get_device_side_effect\n logger = logging.getLogger(__name__)\n caplog.set_level(logging.INFO)\n with pytest.raises(expected_exception):\n AwsQuantumJob._initialize_session(aws_session, device_arn, logger)\n aws_session.get_device.assert_called_with(device_arn)\n assert not caplog.text\n\n\ndef test_regional_device_switches(aws_session, caplog):\n original_region = aws_session.region\n device_region = \"us-east-1\"\n device_arn = f\"arn:aws:braket:{device_region}::device/qpu/test/device-name\"\n mock_session = Mock()\n mock_session.get_device.side_effect = device_arn\n aws_session.copy_session.side_effect = [mock_session]\n logger = logging.getLogger(__name__)\n caplog.set_level(logging.INFO)\n\n assert mock_session == AwsQuantumJob._initialize_session(aws_session, device_arn, logger)\n\n aws_session.copy_session.assert_called_with(region=device_region)\n mock_session.get_device.assert_called_with(device_arn)\n assert f\"Changed session region from '{original_region}' to '{device_region}'\" in caplog.text\n\n\ndef test_initialize_session_for_invalid_device(aws_session, device_arn):\n logger = logging.getLogger(__name__)\n aws_session.get_device.side_effect = ClientError(\n {\n \"Error\": {\n \"Code\": \"ResourceNotFoundException\",\n }\n },\n \"getDevice\",\n )\n\n device_not_found = f\"'{device_arn}' not found.\"\n with pytest.raises(ValueError, match=device_not_found):\n AwsQuantumJob._initialize_session(aws_session, device_arn, logger)\n\n\ndef test_no_region_routing_simulator(aws_session):\n logger = logging.getLogger(__name__)\n\n aws_session.get_device.side_effect = ClientError(\n {\n \"Error\": {\n \"Code\": \"ResourceNotFoundException\",\n }\n },\n \"getDevice\",\n )\n\n device_arn = \"arn:aws:braket:::device/simulator/test/device-name\"\n device_not_found = f\"Simulator '{device_arn}' not found in 'us-west-2'\"\n with pytest.raises(ValueError, match=device_not_found):\n AwsQuantumJob._initialize_session(aws_session, device_arn, logger)\n\n\ndef test_exception_in_credentials_session_region(device_arn, aws_session):\n logger = logging.getLogger(__name__)\n\n aws_session.get_device.side_effect = ClientError(\n {\n \"Error\": {\n \"Code\": \"SomeOtherErrorMessage\",\n }\n },\n \"getDevice\",\n )\n\n error_message = (\n \"An error occurred \\\\(SomeOtherErrorMessage\\\\) \"\n \"when calling the getDevice operation: Unknown\"\n )\n with pytest.raises(ClientError, match=error_message):\n AwsQuantumJob._initialize_session(aws_session, device_arn, logger)\n\n\ndef test_exceptions_in_all_device_regions(aws_session):\n device_arn = \"arn:aws:braket:::device/qpu/test/device-name\"\n logger = logging.getLogger(__name__)\n\n aws_session.get_device.side_effect = [\n ClientError(\n {\n \"Error\": {\n \"Code\": \"ResourceNotFoundException\",\n }\n },\n \"getDevice\",\n ),\n ClientError(\n {\n \"Error\": {\n \"Code\": \"SomeOtherErrorMessage\",\n }\n },\n \"getDevice\",\n ),\n ]\n\n error_message = (\n \"An error occurred \\\\(SomeOtherErrorMessage\\\\) \"\n \"when calling the getDevice operation: Unknown\"\n )\n with pytest.raises(ClientError, match=error_message):\n AwsQuantumJob._initialize_session(aws_session, device_arn, logger)\n\n\n@patch(\"braket.aws.aws_quantum_job.AwsSession\")\ndef test_initialize_session_local_device(mock_new_session, aws_session):\n logger = logging.getLogger(__name__)\n device = \"local:provider.device.name\"\n # don't change a provided AwsSession\n assert AwsQuantumJob._initialize_session(aws_session, device, logger) == aws_session\n # otherwise, create an AwsSession with the profile defaults\n assert AwsQuantumJob._initialize_session(None, device, logger) == mock_new_session()\n\n\ndef test_bad_device_arn_format(aws_session):\n logger = logging.getLogger(__name__)\n device_not_found = (\n \"Device ARN is not a valid format: bad-arn-format. For valid Braket ARNs, \"\n \"see 'https://docs.aws.amazon.com/braket/latest/developerguide/braket-devices.html'\"\n )\n\n with pytest.raises(ValueError, match=device_not_found):\n AwsQuantumJob._initialize_session(aws_session, \"bad-arn-format\", logger)\n","repo_name":"amazon-braket/amazon-braket-sdk-python","sub_path":"test/unit_tests/braket/aws/test_aws_quantum_job.py","file_name":"test_aws_quantum_job.py","file_ext":"py","file_size_in_byte":34394,"program_lang":"python","lang":"en","doc_type":"code","stars":261,"dataset":"github-code","pt":"72"}
+{"seq_id":"25414352655","text":"# coding: utf-8\nfrom __future__ import division, print_function, unicode_literals\nimport sys\nimport re\nfrom operator import itemgetter\nimport codecs\nimport random\n\n\ndef set_encoding(enc='utf_8'):\n sys.stdin = codecs.getreader(enc)(sys.stdin)\n sys.stdout = codecs.getwriter(enc)(sys.stdout)\n sys.stderr = codecs.getwriter(enc)(sys.stderr)\n\n\ndef proc0():\n s = 'stressed'\n t = s[::-1]\n print('t = ' + t)\n\n\ndef proc1():\n s = 'パタトクカシーー'\n t = s[1::2]\n print('t = ' + t)\n\n\ndef proc2():\n s = 'パトカー'\n t = 'タクシー'\n u = ''.join(s[i] + t[i] for i in xrange(len(s)))\n print('u = ' + u)\n\n\ndef proc3():\n s = 'Now I need a drink, alcoholic of course, after the heavy lectures involving quantum mechanics.'\n ss = s.split(' ')\n t = [len(re.sub('\\W', '', w)) for w in ss]\n print('repr(t) = ' + repr(t))\n\n\ndef proc4():\n s = 'Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can.'\n a = map(lambda x: x - 1, [1, 5, 6, 7, 8, 9, 15, 16, 19])\n m = {}\n for i, ss in enumerate(s.split(' ')):\n if i in a:\n m[ss[:1]] = i + 1\n else:\n m[ss[:2]] = i + 1\n l = [item for item in m.iteritems()]\n l.sort(key=itemgetter(1))\n print(repr(l))\n\n\ndef proc5():\n s = raw_input('> ')\n n = 2\n print('# word {}-gram #'.format(n))\n print(repr(word_n_gram(s, 2)))\n print('# char {}-gram #'.format(n))\n print(repr(char_n_gram(s, 2)))\n\n\ndef char_n_gram(s, n):\n return [s[i:i + n] for i in xrange(len(s) - n + 1)]\n\n\ndef word_n_gram(s, n, delim=' '):\n ws = s.split(delim)\n return [ws[i:i + n] for i in xrange(len(ws) - n + 1)]\n\n\ndef proc6():\n s = 'paraparaparadise'\n t = 'paragraph'\n X = set(char_n_gram(s, 2))\n Y = set(char_n_gram(t, 2))\n print('X & Y = ' + repr(X & Y))\n print('X | Y = ' + repr(X | Y))\n print('X - Y = ' + repr(X - Y))\n print('\"se\" in X = ' + repr(u\"se\" in X))\n print('\"se\" in Y = ' + repr(u\"se\" in Y))\n\n\ndef proc7():\n def fn(x, y, z):\n return '{}時の{}は{}'.format(x, y, z)\n\n print(fn(12, '気温', 22.4))\n\n\ndef proc8():\n text = raw_input('> ')\n\n def cipher(s):\n def repl(c):\n if re.match(r'\\w', c):\n return chr(219 - ord(c))\n return c\n\n return ''.join(repl(c) for c in s)\n\n print(cipher(text))\n\n\ndef proc9():\n text = raw_input('> ')\n\n def typoglycemia(s):\n ret = []\n for ss in s.split(' '):\n if len(ss) < 4:\n ret.append(ss)\n continue\n t = list(ss)[1:-1]\n random.shuffle(t)\n ret.append(ss[0] + ''.join(t) + ss[-1])\n return ' '.join(ret)\n\n print(typoglycemia(text))\n\n\ndef main():\n if len(sys.argv) < 2:\n print('usage: python {} NUM'.format(sys.argv[0]), file=sys.stderr)\n sys.exit(1)\n num = int(sys.argv[1])\n eval('proc{}()'.format(num))\n\n\nif __name__ == '__main__':\n set_encoding()\n main()\n","repo_name":"arosh/nlp100-2015","sub_path":"chapter1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"18196334656","text":"# from..config import Config\n# from app.cache.redis_cache import Redis\n\n\n\n# static_file_cache\n# db_cache\n\nclass ModelCache:\n \"\"\"\n \"\"\"\n\n def __init__(self, model:object, cache_schema:object):\n \"\"\"\n \"\"\"\n model_name = model.__name__\n model_data = {}\n for model_row in model.query.all():\n model_data.update({\n str(model_row.id): model_row.to_json()\n })\n self.model = {\n model_name: model_data\n }\n print(self.model)\n","repo_name":"BonkaNyde/bat_cave","sub_path":"app/cache/cache_manager.py","file_name":"cache_manager.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"36151689314","text":"import socket\nimport codecs\n\n\nclass ServerSocket:\n\n def __init__(self):\n self.runServer = True\n\n def initServer(self):\n HOST = 'localhost'\n PORT = 8080\n barcode = ''\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as server:\n print('\\nIniciando el servidor socket..')\n result = ''\n # 60 segs of utility, after that the socket died.\n server.settimeout(10)\n # If another socket are bound to the same address that this socket\n # gonna use, the old socket just go to BLOCKING MODE and let this\n # socket use the address. 👍\n try:\n server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n except Exception:\n pass\n server.bind((HOST, PORT))\n print(f'\\nconectado al host: { HOST }')\n print(f'conectado al puerto: { PORT }\\n')\n server.listen(1)\n try:\n conn, addr = server.accept()\n with conn:\n print('Conectado con: ', addr)\n while conn:\n data = conn.recv(4096)\n if data:\n conn.send(b'1')\n if not data:\n break\n result = codecs.decode(data)\n barcode = result\n print(f'Barcode: {result}')\n\n except socket.timeout:\n print('Socket server timeout')\n\n print('Servidor socket apagado.\\n')\n server.close()\n return barcode\n\n def disconnectServer(self):\n self.runServer = False\n","repo_name":"Pedro-Nicolas-Rios-Vargas/BCScript","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"17144800209","text":"import numpy as np\nfrom typing import List, Tuple\nfrom dataclasses import dataclass\nfrom abc import ABC, abstractmethod\n\n\n@dataclass\nclass TrainingSample:\n \"\"\" Sample for training \"\"\"\n\n current_state: np.ndarray = np.array(\n []\n ) # set of images concatenated along axis-2 (h x w x n)\n action: int = -1 # action taken by network for current_state\n reward: float = 0 # reward rxd for action\n last_episode_state: bool = False # if true, episode ended because of action taken in the current state\n next_state: np.ndarray = np.array([]) # next state after taking action\n\n\nclass Network(ABC):\n \"\"\" Base class for all networks \"\"\"\n\n @abstractmethod\n def init(\n self,\n input_shape: Tuple[int],\n num_actions: int,\n discount_factor: float,\n tb_logdir: str,\n env_action_space: List[int],\n tb_writer,\n ):\n assert False\n\n @abstractmethod\n def predict(\n self,\n state: List[np.ndarray],\n convert_to_openai_action_space=True,\n predict_all_actions=False,\n ):\n \"\"\" if predict_all_actions is true, it returns the output of the network directly (np.ndarray(num_actions), else it returns a single number corresponding to the action with the largest Q value)\"\"\"\n assert False\n\n @abstractmethod\n def train(self, batch_idx: int, batch: List[TrainingSample]):\n assert False\n\n @abstractmethod\n def save(self, chkpt_folder, epi_cnt):\n \"\"\" saves the checkpoint \"\"\"\n assert False\n","repo_name":"acharyahemanth/RL-atari","sub_path":"networks/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"6760539411","text":"import logging\n\nfrom aiogram import Dispatcher\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.types import Message\nfrom tg_bot.dialog_states.questions_box_states import QuestionsBoxDialog\nfrom tg_bot.users import User, users\n\n\nclass QuestionsBoxService:\n def __init__(self):\n pass\n\n async def save_questions(self, message: Message, state: FSMContext):\n answer = message.text\n user = message.from_user.id\n\n if user not in users:\n users[user] = User(user_questions=[answer])\n\n else:\n users[user].user_questions.append(answer)\n\n await state.finish()\n\n await message.answer(text=f\"Answer saved {users[user].user_questions}\",\n parse_mode=\"HTML\")\n\n async def request_question(self, message: Message):\n await message.answer(text=\"Задайте вопрос\", parse_mode=\"HTML\")\n\n await QuestionsBoxDialog.question.set()\n","repo_name":"KuranovaPolina/suai-bot-student-telegram-client","sub_path":"tg_bot/handlers/questions_box.py","file_name":"questions_box.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"}
+{"seq_id":"20685523467","text":"from flask import Flask, request, send_from_directory\r\nfrom flask_cors import CORS, cross_origin\r\n\r\napp = Flask(__name__, static_folder='GrammarChecker/build', static_url_path='')\r\nCORS(app)\r\n\r\n@app.route(\"/\")\r\n@cross_origin()\r\ndef serve():\r\n return send_from_directory(app.static_folder, 'index.html')\r\n\r\n@app.route(\"/incomingData\", methods=['POST'])\r\n@cross_origin()\r\ndef data():\r\n import spellcheck\r\n request_data=request.get_json()\r\n data=request_data['data']\r\n res=spellcheck.correct_word_spelling(data)\r\n ans=''\r\n for i in res:\r\n ans+=i\r\n ans+=\" \"\r\n return ans\r\n\r\nif __name__ == '__main__':\r\n app.run(host=\"0.0.0.0\", port=5000, debug=True, threaded=True)","repo_name":"NaK915/Spell-Checker","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"10962163478","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jun 3 22:25:52 2020\r\n\r\n@author: Giovanni\r\n\"\"\"\r\n\r\n# Agrupamento com algoritmo K-medoids \r\n# esse algoritmo escolhe pontos reais ja existentes na base de dados \r\n\r\n#pip install pyclustering\r\n\r\nfrom sklearn import datasets\r\nfrom sklearn.metrics import confusion_matrix\r\nimport numpy as np\r\nfrom pyclustering.cluster.kmedoids import kmedoids \r\nfrom pyclustering.cluster import cluster_visualizer\r\n\r\niris = datasets.load_iris()\r\n\r\n# nos vamos trabalhar com o agrupamento dos atributos 0 e 1 \r\n# pois essa função de visualização so nos permite ver dois atributos \r\n\r\ncluster = kmedoids(iris.data[:,0:2], [3,12,20])\r\n # dados[todas as linhas, colunas 0 e 1 ]\r\n # [3,12,20] = initial_index_medoid = indice dos pontos na base de dados q vamos utilizar para indexação\r\n # é comum usar pontos aleatórios mesmo, nao faz muita diferença \r\ncluster.get_medoids() \r\n # Out[21]: [3, 12, 20] \r\ncluster.process()\r\n # a função process faz o treinamento, ou melhor, o agrupamento \r\nprevisoes = cluster.get_clusters()\r\n\r\nmedoids = cluster.get_medoids()\r\n # encontramos os pontos 7, 67 e 112, que são nossos verdadeiros medoids \r\n\r\n#Visualização do cluster:\r\nv = cluster_visualizer()\r\nv.append_clusters(previsoes, iris.data[:,0:2])\r\nv.append_cluster(medoids, iris.data[:,0:2], marker='*', markersize=15) # para marcar com estrela onde estão os medoids \r\nv.show()\r\n\r\n# Comparativo para ver os acertos: \r\n # Precisamos fazer uma coficação manual para conseguir gerar uma variavel no padrão do iris.target\r\n # para conseguir usar a confusion_matrix:\r\nlista_previsoes = []\r\nlista_real = []\r\nfor i in range(len(previsoes)):\r\n print('-----')\r\n print(i)\r\n print('-----')\r\n \r\n for j in range(len(previsoes[i])):\r\n #print(j)\r\n print(previsoes[i][j])\r\n lista_previsoes.append(i) \r\n lista_real.append(iris.target[previsoes[i][j]])\r\n \r\n# Transformando essas duas listas pro formato do numpy array\r\nlista_previsoes = np.asarray(lista_previsoes)\r\nlista_real = np.asarray(lista_real)\r\n\r\nresultados = confusion_matrix(lista_real, lista_previsoes)\r\n # contabilizamos 26 erros ","repo_name":"GiovanniBru/Data-Science","sub_path":"testes python/24_agrupamento_kmedoids.py","file_name":"24_agrupamento_kmedoids.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"41655679487","text":"# -*- coding: utf-8 -*-\nimport MySQLdb\nimport config\n\nclass JediMaster:\n def __init__(self):\n self.db = {}\n self.jedi = MySQLdb.connect(u\"localhost\", config.user, config.password)\n self.set_encoding(self.jedi)\n\n def get_database(self, database = None):\n if not database: return self.jedi\n if database not in self.db:\n self.db[database] = MySQLdb.connect(u\"localhost\", config.user, config.password, database)\n self.set_encoding(self.db[database])\n return self.db[database]\n\n def set_encoding(self, db):\n db.set_character_set('utf8')\n cursor = db.cursor()\n cursor.execute(u'SET NAMES utf8;')\n cursor.execute(u'SET CHARACTER SET utf8;')\n cursor.execute(u'SET character_set_connection=utf8;')\n\n def create_database(self, db):\n cursor = self.get_database().cursor()\n cursor.execute(u\"CREATE DATABASE IF NOT EXISTS {} DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci\".format(db))\n\n def create_table(self, database, table, columns):\n cursor = self.get_database(database).cursor()\n for i in xrange(len(columns)): columns[i] = u\" \".join(columns[i])\n columns = u\", \".join(columns)\n command = u\"CREATE TABLE IF NOT EXISTS {} ({})\".format(table, columns)\n cursor.execute(command)\n\n def insert_column(self, database, table, column):\n cursor = self.get_database(database).cursor()\n s = u\", \".join([u\"%s\"] * len(column))\n cursor.execute(u\"INSERT INTO {} VALUES ({})\".format(table, s), column)\n\n def insert_column_custom(self, database ,table, column):\n cursor = self.get_database(database).cursor()\n key = []\n value = []\n for k in column.keys():\n key.append(k)\n value.append(u\"%({})s\".format(k))\n key = \", \".join(key)\n value = \", \".join(value)\n cursor.execute(u\"INSERT INTO {} ({}) VALUES ({})\".format(table, key, value), column)\n\n def execute(self, database, query):\n cursor = self.get_database(database).cursor()\n cursor.execute(query)\n return cursor.fetchall()\n\n def commit(self, database):\n db = self.get_database(database)\n db.commit()\n","repo_name":"oalieno/JediSQL","sub_path":"JediSQL.py","file_name":"JediSQL.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"}
+{"seq_id":"12861080206","text":"from collections import deque\n\ndef solution(skill, skill_trees):\n skillList = set(skill)\n cnt = 0\n\n for st in skill_trees:\n tmpStr = ''\n st = deque(list(st))\n while st: # 순서에 있는 스킬들만 뽑아서 찍어본다\n tmpChar = st.popleft()\n if tmpChar in skillList:\n tmpStr += tmpChar\n\n idx = 0\n while idx < len(tmpStr): # 내가 찍은 스킬트리와 스킬 순서를 앞부터 한글자 씩 비교\n if skill[idx] != tmpStr[idx]:\n break\n idx += 1\n else:\n cnt += 1\n\n return cnt\n\nif __name__ == \"__main__\":\n skill, skill_trees = \"CBD\", [\"BACDE\", \"CBADF\", \"AECB\", \"BDA\"]\n print(solution(skill, skill_trees)) # 2","repo_name":"LastCow9000/Algorithms","sub_path":"Algorithm/Programmers/스킬트리/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"31767650967","text":"import numpy as np\n\nfrom tf_from_sql.db import SensorLogEntry, Session\n\n\ndef create_samples_from_db() -> tuple[np.ndarray, np.ndarray]:\n session = Session()\n session_log_entries = list(session.query(SensorLogEntry))\n\n num_sensors = 2 # sensors ids are in range(num_sensors)\n num_events = 3 # event ids are in range(num_events)\n\n # features\n X = np.zeros((len(session_log_entries), num_sensors), dtype=\"float32\")\n # labels\n Y_true = np.zeros((len(session_log_entries), num_events), dtype=\"float32\")\n for i, session_log_entry in enumerate(session_log_entries):\n # one-hot encoding of features and labels\n X[i, session_log_entry.sensor] = 1.0\n Y_true[i, session_log_entry.event] = 1.0\n\n return X, Y_true\n","repo_name":"AlexElvers/tf-from-sql","sub_path":"tf_from_sql/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"38842273050","text":"from flask import Flask, jsonify\nfrom flask import request\nfrom Bio import Entrez\nfrom spacy.matcher import Matcher\nimport spacy\nfrom age_matcher import get_age\nfrom analyte_matcher import load_analytes\nfrom analyte_matcher import get_analytes\nfrom analyte_matcher import createMatcher\nfrom omic_matcher import createMetaboliteMatcher\nfrom control_group_matcher import get_control_groups, get_healthy_control_groups\nfrom fluid_matcher import get_fluids\nfrom n_matcher import get_n\nfrom omic_matcher import get_omics, load_metabolites\nfrom sex_matcher import get_sexes\n\nnlp = spacy.load(\"en_core_web_trf\")\ntest = spacy.load(\"en_ner_bionlp13cg_md\")\n\nemail = ''\nmetabolite_list = load_metabolites('metabolites.csv')\nanalyte_list = load_analytes('analytes.csv')\nanaylte_matcher = createMatcher(nlp, analyte_list)\nmetabolite_matcher = createMetaboliteMatcher(nlp, metabolite_list)\n\napp = Flask(__name__)\n\n@app.route('/paper')\ndef papers():\n doi = request.args.get('doi')\n pmid = request.args.get('pmid')\n Entrez.email = email\n if(doi):\n handle = Entrez.esearch(db=\"pubmed\", term=doi, retmax=100)\n record = Entrez.read(handle)\n id = record['IdList']\n handle.close()\n else:\n id = pmid\n handle = Entrez.elink(dbfrom=\"pubmed\", db=\"pmc\", linkname=\"pubmed_pmc\", id=''.join(id), retmode=\"xml\")\n id_return = Entrez.read(handle)\n handle.close()\n handle = Entrez.efetch(db=\"pmc\", id=id_return[0]['LinkSetDb'][0]['Link'][0]['Id']) \n records = handle.read()\n handle.close()\n results = []\n text = nlp(records.decode(\"utf-8\").replace('\\n', ' '))\n potential_n = get_n(nlp, text)\n umlstext = test(text)\n potential_sexes = get_sexes(nlp, text)\n potential_fluids = get_fluids(nlp, text)\n potential_omics = get_omics(nlp, text)\n potential_ages = get_age(nlp, text)\n potential_control_groups = get_control_groups(nlp, text)\n potential_healthy_control_groups = get_healthy_control_groups(nlp, text)\n potential_analytes = get_analytes(nlp, text, analyte_list)\n results.append({\n 'doi': doi,'input': [t.text for t in text], \n 'size': [{'start':item.start, 'end': item.end} for item in potential_n], \n 'fluids':[{'start':item.start, 'end': item.end} for item in potential_fluids], \n 'sexes':[{'start':item.start, 'end': item.end} for item in potential_sexes], \n 'ages':[{'start':item.start, 'end': item.end} for item in potential_ages],\n 'omics':[{'start':item.start, 'end': item.end} for item in potential_omics],\n 'controlGroups': [{'start':item.start, 'end': item.end} for item in potential_control_groups],\n 'healthyControlGroups': [{'start':item.start, 'end': item.end} for item in potential_healthy_control_groups],\n 'analytes': [{'start':item.start, 'end': item.end} for item in potential_analytes],\n 'umls': [{'start': item.start, 'end': item.end, 'label': item.label_, 'text': item.text} for item in umlstext.ents],\n })\n return '
'.join(results)\n\n@app.route(\"/abstract\")\ndef entrance():\n doi = request.args.get('doi')\n pmid = request.args.get('pmid')\n print(pmid, flush=True)\n dois = str(doi).split(',')\n abstract_dict = {}\n without_abstract = []\n Entrez.email = email\n if (doi):\n handle = Entrez.esearch(db=\"pubmed\", term=' OR '.join(dois), retmax=100)\n record = Entrez.read(handle)\n ids = record['IdList']\n handle.close()\n else:\n ids = pmid.split(',')\n handle = Entrez.efetch(db=\"pubmed\", id=','.join(ids),rettype=\"xml\", retmode=\"text\")\n records = Entrez.read(handle)\n for pubmed_article in records['PubmedArticle']:\n pmid = int(str(pubmed_article['MedlineCitation']['PMID']))\n article = pubmed_article['MedlineCitation']['Article']\n if 'Abstract' in article:\n abstract = ' '.join(article['Abstract']['AbstractText']).replace(',', '')\n abstract_dict[pmid] = abstract.encode(\"ascii\", \"ignore\").decode()\n else:\n without_abstract.append(pmid)\n handle.close()\n results = []\n for key, abstract in abstract_dict.items():\n text = nlp(abstract)\n umlstext = test(abstract)\n potential_n = get_n(nlp, text)\n potential_sexes = get_sexes(nlp, text)\n potential_fluids = get_fluids(nlp, text)\n potential_omics = get_omics(nlp, text, metabolite_matcher)\n potential_ages = get_age(nlp, text)\n potential_control_groups = get_control_groups(nlp, text)\n potential_healthy_control_groups = get_healthy_control_groups(nlp, text)\n potential_analytes = get_analytes(text, anaylte_matcher)\n results.append({\n 'doi': key,'input': [t.text for t in text], \n 'size': [{'start':item.start, 'end': item.end} for item in potential_n], \n 'fluids':[{'start':item.start, 'end': item.end} for item in potential_fluids], \n 'sexes':[{'start':item.start, 'end': item.end} for item in potential_sexes], \n 'ages':[{'start':item.start, 'end': item.end} for item in potential_ages],\n 'omics':[{'start':item.start, 'end': item.end} for item in potential_omics],\n 'controlGroups': [{'start':item.start, 'end': item.end} for item in potential_control_groups],\n 'healthyControlGroups': [{'start':item.start, 'end': item.end} for item in potential_healthy_control_groups],\n 'analytes': [{'start':item.start, 'end': item.end} for item in potential_analytes],\n 'umls': [{'start': item.start, 'end': item.end, 'label': item.label_, 'text': item.text} for item in umlstext.ents],\n })\n if (len(abstract_dict.items()) > 0):\n response = jsonify(results)\n response.headers.add(\"Access-Control-Allow-Origin\", \"*\")\n return response\n else:\n return 'no abstract
'\n\n\nif __name__ == '__main__':\n print('loading')\n app.run(port=9090)","repo_name":"MatthewMong/paperAnalyzer","sub_path":"extraResources/backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"6713972905","text":"# Create a library class\n# display book\n# lend book - (who owns the book if not present)\n# add book\n# return book\n\n# SauravLibrary = Library (Listofbooks, library_name)\n\n# dictionary (books-nameofperson)\n\n# create a main function and run an infinite while loop asking\n# users for their input\n\nclass Library:\n def __init__(self,booklist,name):\n self.booklist=booklist\n self.name=name\n self.lenddict={}\n def displaybook(self):\n print(f\"\\nAll available books in our {self.name} library :\\b\")\n for book in self.booklist:\n print(book)\n def lendbook(self,user,book):\n if book not in self.lenddict and book in self.booklist:\n self.lenddict[book]=user\n self.booklist.remove(book)\n print(f\"\\n{book} is now given to {user}\")\n else:\n if book in self.lenddict:\n print(f\"\\n{book} is not available because it is lended by {self.lenddict[book]}\")\n else:\n print(f\"\\n{book} is not available in {self.name} Library\")\n def addbook(self,book):\n self.booklist.append(book)\n print(f\"\\n{book} has been added to {self.name} Library\")\n def returnbook(self,book):\n if book in self.lenddict:\n self.lenddict.pop(book)\n self.booklist.append(book)\n print(f\"\\n{book} has been added to {self.name} Library\")\n else:\n print(f\"\\n{book} has not been lended by anyone till now\")\n\nif __name__ == '__main__':\n saurav = Library([\"c++\", \"csa\", \"java\", \"dsa\", \"python\", \"php\"], \"saurav\")\n print(\"\\nWelcome to codewithsaurav Library !\")\n choice='y'\n while(choice==\"y\" or choice==\"Y\"):\n print(\"\\npress 1 : To display all books \")\n print(\"press 2 : To lend books\")\n print(\"press 3 : To add book\")\n print(\"press 4 : To return book\")\n user_inp=input(\"Enter your choice : \")\n if user_inp not in ['1','2','3','4']:\n print(\"\\nplease enter a valid option !\\n\")\n continue\n else:\n user_inp=int(user_inp)\n if user_inp==1:\n saurav.displaybook()\n elif user_inp==2:\n name=input(\"\\nEnter the name of the person who wants to lend a book : \")\n book=input(\"Enter the name of the book you want to lend : \")\n saurav.lendbook(name,book)\n elif user_inp==3:\n book = input(\"\\nEnter the book you want to add : \")\n saurav.addbook(book)\n else:\n book=input(\"\\nEnter the book you want to return : \")\n saurav.returnbook(book)\n choice=input(\"\\npress y or Y to continue or any other key to exit : \")\n\n","repo_name":"sauravganguly2018/Python_Tutorials","sub_path":"mini_proj1.py","file_name":"mini_proj1.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"37973308989","text":"from typing import TYPE_CHECKING\n\nfrom trezor import io, loop, ui\nfrom trezor.enums import ButtonRequestType\nfrom trezor.wire import ActionCancelled\n\nimport trezorui2\n\nfrom ..common import button_request, interact\n\nif TYPE_CHECKING:\n from typing import Any, NoReturn, Awaitable, Iterable, Sequence, TypeVar\n\n from trezor.wire import GenericContext, Context\n from ..common import PropertyType, ExceptionType\n\n T = TypeVar(\"T\")\n\n\nCONFIRMED = trezorui2.CONFIRMED\nCANCELLED = trezorui2.CANCELLED\nINFO = trezorui2.INFO\n\nBR_TYPE_OTHER = ButtonRequestType.Other # global_import_cache\n\n\nif __debug__:\n from trezor.utils import DISABLE_ANIMATION\n\n trezorui2.disable_animation(bool(DISABLE_ANIMATION))\n\n\nclass RustLayout(ui.Layout):\n # pylint: disable=super-init-not-called\n def __init__(self, layout: Any):\n self.layout = layout\n self.timer = loop.Timer()\n self.layout.attach_timer_fn(self.set_timer)\n\n def set_timer(self, token: int, deadline: int) -> None:\n self.timer.schedule(deadline, token)\n\n def request_complete_repaint(self) -> None:\n msg = self.layout.request_complete_repaint()\n assert msg is None\n\n def _paint(self) -> None:\n import storage.cache as storage_cache\n\n painted = self.layout.paint()\n\n ui.refresh()\n if storage_cache.homescreen_shown is not None and painted:\n storage_cache.homescreen_shown = None\n\n if __debug__:\n from trezor.enums import DebugPhysicalButton\n\n def create_tasks(self) -> tuple[loop.AwaitableTask, ...]:\n return (\n self.handle_input_and_rendering(),\n self.handle_timers(),\n self.handle_swipe_signal(),\n self.handle_button_signal(),\n self.handle_result_signal(),\n )\n\n async def handle_result_signal(self) -> None:\n \"\"\"Enables sending arbitrary input - ui.Result.\n\n Waits for `result_signal` and carries it out.\n \"\"\"\n from apps.debug import result_signal\n from storage import debug as debug_storage\n\n while True:\n event_id, result = await result_signal()\n # Layout change will be notified in _first_paint of the next layout\n debug_storage.new_layout_event_id = event_id\n raise ui.Result(result)\n\n def read_content_into(self, content_store: list[str]) -> None:\n \"\"\"Reads all the strings/tokens received from Rust into given list.\"\"\"\n\n def callback(*args: Any) -> None:\n for arg in args:\n content_store.append(str(arg))\n\n content_store.clear()\n self.layout.trace(callback)\n\n async def _press_left(self, hold_ms: int | None) -> Any:\n \"\"\"Triggers left button press.\"\"\"\n self.layout.button_event(io.BUTTON_PRESSED, io.BUTTON_LEFT)\n self._paint()\n if hold_ms is not None:\n await loop.sleep(hold_ms)\n return self.layout.button_event(io.BUTTON_RELEASED, io.BUTTON_LEFT)\n\n async def _press_right(self, hold_ms: int | None) -> Any:\n \"\"\"Triggers right button press.\"\"\"\n self.layout.button_event(io.BUTTON_PRESSED, io.BUTTON_RIGHT)\n self._paint()\n if hold_ms is not None:\n await loop.sleep(hold_ms)\n return self.layout.button_event(io.BUTTON_RELEASED, io.BUTTON_RIGHT)\n\n async def _press_middle(self, hold_ms: int | None) -> Any:\n \"\"\"Triggers middle button press.\"\"\"\n self.layout.button_event(io.BUTTON_PRESSED, io.BUTTON_LEFT)\n self._paint()\n self.layout.button_event(io.BUTTON_PRESSED, io.BUTTON_RIGHT)\n self._paint()\n if hold_ms is not None:\n await loop.sleep(hold_ms)\n self.layout.button_event(io.BUTTON_RELEASED, io.BUTTON_LEFT)\n self._paint()\n return self.layout.button_event(io.BUTTON_RELEASED, io.BUTTON_RIGHT)\n\n async def _press_button(\n self,\n event_id: int | None,\n btn_to_press: DebugPhysicalButton,\n hold_ms: int | None,\n ) -> Any:\n from trezor.enums import DebugPhysicalButton\n from trezor import workflow\n from apps.debug import notify_layout_change\n from storage import debug as debug_storage\n\n if btn_to_press == DebugPhysicalButton.LEFT_BTN:\n msg = await self._press_left(hold_ms)\n elif btn_to_press == DebugPhysicalButton.MIDDLE_BTN:\n msg = await self._press_middle(hold_ms)\n elif btn_to_press == DebugPhysicalButton.RIGHT_BTN:\n msg = await self._press_right(hold_ms)\n else:\n raise Exception(f\"Unknown button: {btn_to_press}\")\n\n if msg is not None:\n # Layout change will be notified in _first_paint of the next layout\n debug_storage.new_layout_event_id = event_id\n raise ui.Result(msg)\n\n # So that these presses will keep trezor awake\n # (it will not be locked after auto_lock_delay_ms)\n workflow.idle_timer.touch()\n\n self._paint()\n notify_layout_change(self, event_id)\n\n async def _swipe(self, event_id: int | None, direction: int) -> None:\n \"\"\"Triggers swipe in the given direction.\n\n Only `UP` and `DOWN` directions are supported.\n \"\"\"\n from trezor.enums import DebugPhysicalButton, DebugSwipeDirection\n\n if direction == DebugSwipeDirection.UP:\n btn_to_press = DebugPhysicalButton.RIGHT_BTN\n elif direction == DebugSwipeDirection.DOWN:\n btn_to_press = DebugPhysicalButton.LEFT_BTN\n else:\n raise Exception(f\"Unsupported direction: {direction}\")\n\n await self._press_button(event_id, btn_to_press, None)\n\n async def handle_swipe_signal(self) -> None:\n \"\"\"Enables pagination through the current page/flow page.\n\n Waits for `swipe_signal` and carries it out.\n \"\"\"\n from apps.debug import swipe_signal\n\n while True:\n event_id, direction = await swipe_signal()\n await self._swipe(event_id, direction)\n\n async def handle_button_signal(self) -> None:\n \"\"\"Enables clicking arbitrary of the three buttons.\n\n Waits for `button_signal` and carries it out.\n \"\"\"\n from apps.debug import button_signal\n\n while True:\n event_id, btn, hold_ms = await button_signal()\n await self._press_button(event_id, btn, hold_ms)\n\n else:\n\n def create_tasks(self) -> tuple[loop.AwaitableTask, ...]:\n return self.handle_timers(), self.handle_input_and_rendering()\n\n def _first_paint(self) -> None:\n # Clear the screen of any leftovers.\n ui.display.clear()\n self._paint()\n\n if __debug__ and self.should_notify_layout_change:\n from apps.debug import notify_layout_change\n from storage import debug as debug_storage\n\n # notify about change and do not notify again until next await.\n # (handle_rendering might be called multiple times in a single await,\n # because of the endless loop in __iter__)\n self.should_notify_layout_change = False\n\n # Possibly there is an event ID that caused the layout change,\n # so notifying with this ID.\n event_id = None\n if debug_storage.new_layout_event_id is not None:\n event_id = debug_storage.new_layout_event_id\n debug_storage.new_layout_event_id = None\n\n notify_layout_change(self, event_id)\n\n def handle_input_and_rendering(self) -> loop.Task: # type: ignore [awaitable-is-generator]\n from trezor import workflow\n\n button = loop.wait(io.BUTTON)\n self._first_paint()\n while True:\n # Using `yield` instead of `await` to avoid allocations.\n event, button_num = yield button\n workflow.idle_timer.touch()\n msg = None\n if event in (io.BUTTON_PRESSED, io.BUTTON_RELEASED):\n msg = self.layout.button_event(event, button_num)\n if msg is not None:\n raise ui.Result(msg)\n self._paint()\n\n def handle_timers(self) -> loop.Task: # type: ignore [awaitable-is-generator]\n while True:\n # Using `yield` instead of `await` to avoid allocations.\n token = yield self.timer\n msg = self.layout.timer(token)\n if msg is not None:\n raise ui.Result(msg)\n self._paint()\n\n def page_count(self) -> int:\n \"\"\"How many paginated pages current screen has.\"\"\"\n return self.layout.page_count()\n\n\ndef draw_simple(layout: Any) -> None:\n # Simple drawing not supported for layouts that set timers.\n def dummy_set_timer(token: int, deadline: int) -> None:\n raise RuntimeError\n\n layout.attach_timer_fn(dummy_set_timer)\n ui.display.clear()\n layout.paint()\n ui.refresh()\n\n\n# Temporary function, so we know where it is used\n# Should be gradually replaced by custom designs/layouts\nasync def _placeholder_confirm(\n ctx: GenericContext,\n br_type: str,\n title: str,\n data: str | None = None,\n description: str | None = None,\n *,\n verb: str = \"CONFIRM\",\n verb_cancel: str | None = \"\",\n hold: bool = False,\n br_code: ButtonRequestType = BR_TYPE_OTHER,\n) -> Any:\n return await confirm_action(\n ctx,\n br_type,\n title.upper(),\n data,\n description,\n verb=verb,\n verb_cancel=verb_cancel,\n hold=hold,\n reverse=True,\n br_code=br_code,\n )\n\n\nasync def get_bool(\n ctx: GenericContext,\n br_type: str,\n title: str,\n data: str | None = None,\n description: str | None = None,\n verb: str = \"CONFIRM\",\n verb_cancel: str | None = \"\",\n hold: bool = False,\n br_code: ButtonRequestType = BR_TYPE_OTHER,\n) -> bool:\n result = await interact(\n ctx,\n RustLayout(\n trezorui2.confirm_action(\n title=title.upper(),\n action=data,\n description=description,\n verb=verb,\n verb_cancel=verb_cancel,\n hold=hold,\n )\n ),\n br_type,\n br_code,\n )\n\n return result is CONFIRMED\n\n\nasync def raise_if_not_confirmed(a: Awaitable[T], exc: Any = ActionCancelled) -> T:\n result = await a\n if result is not CONFIRMED:\n raise exc\n return result\n\n\nasync def confirm_action(\n ctx: GenericContext,\n br_type: str,\n title: str,\n action: str | None = None,\n description: str | None = None,\n description_param: str | None = None,\n verb: str = \"CONFIRM\",\n verb_cancel: str | None = \"\",\n hold: bool = False,\n hold_danger: bool = False,\n reverse: bool = False,\n exc: ExceptionType = ActionCancelled,\n br_code: ButtonRequestType = BR_TYPE_OTHER,\n) -> None:\n if verb_cancel is not None:\n verb_cancel = verb_cancel.upper()\n\n if description is not None and description_param is not None:\n description = description.format(description_param)\n\n await raise_if_not_confirmed(\n interact(\n ctx,\n RustLayout(\n trezorui2.confirm_action(\n title=title.upper(),\n action=action,\n description=description,\n verb=verb.upper(),\n verb_cancel=verb_cancel,\n hold=hold,\n reverse=reverse,\n )\n ),\n br_type,\n br_code,\n ),\n exc,\n )\n\n\nasync def confirm_single(\n ctx: GenericContext,\n br_type: str,\n title: str,\n description: str,\n description_param: str | None = None,\n verb: str | None = None,\n) -> None:\n description_param = description_param or \"\"\n begin, _separator, end = description.partition(\"{}\")\n await confirm_action(\n ctx,\n br_type,\n title,\n description=begin + description_param + end,\n verb=verb or \"CONFIRM\",\n br_code=ButtonRequestType.ProtectCall,\n )\n\n\nasync def confirm_reset_device(\n ctx: GenericContext,\n title: str,\n recovery: bool = False,\n) -> None:\n if recovery:\n button = \"RECOVER WALLET\"\n else:\n button = \"CREATE WALLET\"\n\n await raise_if_not_confirmed(\n interact(\n ctx,\n RustLayout(\n trezorui2.confirm_reset_device(\n title=title.upper(),\n button=button,\n )\n ),\n \"recover_device\" if recovery else \"setup_device\",\n ButtonRequestType.ProtectCall\n if recovery\n else ButtonRequestType.ResetDevice,\n )\n )\n\n\n# TODO cleanup @ redesign\nasync def confirm_backup(ctx: GenericContext) -> bool:\n if await get_bool(\n ctx,\n \"backup_device\",\n \"SUCCESS\",\n description=\"New wallet has been created.\\nIt should be backed up now!\",\n verb=\"BACK UP\",\n verb_cancel=\"SKIP\",\n br_code=ButtonRequestType.ResetDevice,\n ):\n return True\n\n return await get_bool(\n ctx,\n \"backup_device\",\n \"WARNING\",\n \"Are you sure you want to skip the backup?\\n\",\n \"You can back up your Trezor once, at any time.\",\n verb=\"BACK UP\",\n verb_cancel=\"SKIP\",\n br_code=ButtonRequestType.ResetDevice,\n )\n\n\nasync def confirm_path_warning(\n ctx: GenericContext,\n path: str,\n path_type: str | None = None,\n) -> None:\n if path_type:\n title = f\"Unknown {path_type}\"\n else:\n title = \"Unknown path\"\n return await _placeholder_confirm(\n ctx,\n \"path_warning\",\n title.upper(),\n description=path,\n br_code=ButtonRequestType.UnknownDerivationPath,\n )\n\n\nasync def confirm_homescreen(\n ctx: GenericContext,\n image: bytes,\n) -> None:\n # TODO: show homescreen preview?\n await confirm_action(\n ctx,\n \"set_homescreen\",\n \"Set homescreen\",\n description=\"Do you really want to set new homescreen image?\",\n br_code=ButtonRequestType.ProtectCall,\n )\n\n\ndef _show_xpub(xpub: str, title: str, cancel: str | None) -> ui.Layout:\n return RustLayout(\n trezorui2.confirm_blob(\n title=title.upper(),\n data=xpub,\n verb_cancel=cancel,\n description=None,\n extra=None,\n )\n )\n\n\nasync def show_xpub(ctx: GenericContext, xpub: str, title: str) -> None:\n await raise_if_not_confirmed(\n interact(\n ctx,\n _show_xpub(xpub, title, None),\n \"show_xpub\",\n ButtonRequestType.PublicKey,\n )\n )\n\n\nasync def show_address(\n ctx: GenericContext,\n address: str,\n *,\n address_qr: str | None = None,\n case_sensitive: bool = True,\n path: str | None = None,\n account: str | None = None,\n network: str | None = None,\n multisig_index: int | None = None,\n xpubs: Sequence[str] = (),\n) -> None:\n send_button_request = True\n # Will be a marquee in case of multisig\n title = (\n \"RECEIVE ADDRESS (MULTISIG)\"\n if multisig_index is not None\n else \"RECEIVE ADDRESS\"\n )\n while True:\n layout = RustLayout(\n trezorui2.confirm_address(\n title=title,\n data=address,\n description=\"\", # unused on TR\n extra=None, # unused on TR\n )\n )\n if send_button_request:\n send_button_request = False\n await button_request(\n ctx,\n \"show_address\",\n ButtonRequestType.Address,\n pages=layout.page_count(),\n )\n result = await ctx.wait(layout)\n\n # User confirmed with middle button.\n if result is CONFIRMED:\n break\n\n # User pressed right button, go to address details.\n elif result is INFO:\n\n def xpub_title(i: int) -> str:\n # Will be marquee (cannot fit one line)\n result = f\"MULTISIG XPUB #{i + 1}\"\n result += \" (YOURS)\" if i == multisig_index else \" (COSIGNER)\"\n return result\n\n result = await ctx.wait(\n RustLayout(\n trezorui2.show_address_details(\n address=address if address_qr is None else address_qr,\n case_sensitive=case_sensitive,\n account=account,\n path=path,\n xpubs=[(xpub_title(i), xpub) for i, xpub in enumerate(xpubs)],\n )\n ),\n )\n # Can only go back from the address details.\n assert result is CANCELLED\n\n # User pressed left cancel button, show mismatch dialogue.\n else:\n result = await ctx.wait(RustLayout(trezorui2.show_mismatch()))\n assert result in (CONFIRMED, CANCELLED)\n # Right button aborts action, left goes back to showing address.\n if result is CONFIRMED:\n raise ActionCancelled\n\n\ndef show_pubkey(\n ctx: Context, pubkey: str, title: str = \"Confirm public key\"\n) -> Awaitable[None]:\n return confirm_blob(\n ctx,\n \"show_pubkey\",\n title.upper(),\n pubkey,\n br_code=ButtonRequestType.PublicKey,\n )\n\n\nasync def _show_modal(\n ctx: GenericContext,\n br_type: str,\n header: str,\n subheader: str | None,\n content: str,\n button_confirm: str | None,\n button_cancel: str | None,\n br_code: ButtonRequestType,\n exc: ExceptionType = ActionCancelled,\n) -> None:\n await confirm_action(\n ctx,\n br_type,\n header.upper(),\n subheader,\n content,\n verb=button_confirm or \"\",\n verb_cancel=button_cancel,\n exc=exc,\n br_code=br_code,\n )\n\n\nasync def show_error_and_raise(\n ctx: GenericContext,\n br_type: str,\n content: str,\n header: str = \"Error\",\n subheader: str | None = None,\n button: str = \"Close\",\n red: bool = False, # unused on TR\n exc: ExceptionType = ActionCancelled,\n) -> NoReturn:\n await _show_modal(\n ctx,\n br_type,\n header,\n subheader,\n content,\n button_confirm=None,\n button_cancel=button,\n br_code=BR_TYPE_OTHER,\n exc=exc,\n )\n raise exc\n\n\ndef show_warning(\n ctx: GenericContext,\n br_type: str,\n content: str,\n subheader: str | None = None,\n button: str = \"Try again\",\n br_code: ButtonRequestType = ButtonRequestType.Warning,\n) -> Awaitable[None]:\n return _show_modal(\n ctx,\n br_type,\n \"\",\n subheader or \"WARNING\",\n content,\n button_confirm=button,\n button_cancel=None,\n br_code=br_code,\n )\n\n\ndef show_success(\n ctx: GenericContext,\n br_type: str,\n content: str,\n subheader: str | None = None,\n button: str = \"Continue\",\n) -> Awaitable[None]:\n title = \"Success\"\n\n # In case only subheader is supplied, showing it\n # in regular font, not bold.\n if not content and subheader:\n content = subheader\n subheader = None\n\n # Special case for Shamir backup - to show everything just on one page\n # in regular font.\n if \"Continue with\" in content:\n content = f\"{subheader}\\n{content}\"\n subheader = None\n title = \"\"\n\n return _show_modal(\n ctx,\n br_type,\n title,\n subheader,\n content,\n button_confirm=button,\n button_cancel=None,\n br_code=ButtonRequestType.Success,\n )\n\n\nasync def confirm_output(\n ctx: GenericContext,\n address: str,\n amount: str,\n title: str = \"Confirm sending\",\n hold: bool = False,\n br_code: ButtonRequestType = ButtonRequestType.ConfirmOutput,\n address_label: str | None = None,\n output_index: int | None = None,\n) -> None:\n address_title = (\n \"RECIPIENT\" if output_index is None else f\"RECIPIENT #{output_index + 1}\"\n )\n amount_title = \"AMOUNT\" if output_index is None else f\"AMOUNT #{output_index + 1}\"\n\n await raise_if_not_confirmed(\n interact(\n ctx,\n RustLayout(\n trezorui2.confirm_output(\n address=address,\n address_label=address_label or \"\",\n address_title=address_title,\n amount_title=amount_title,\n amount=amount,\n )\n ),\n \"confirm_output\",\n br_code,\n )\n )\n\n\nasync def tutorial(\n ctx: GenericContext,\n br_code: ButtonRequestType = BR_TYPE_OTHER,\n) -> None:\n \"\"\"Showing users how to interact with the device.\"\"\"\n await interact(\n ctx,\n RustLayout(trezorui2.tutorial()),\n \"tutorial\",\n br_code,\n )\n\n\nasync def confirm_payment_request(\n ctx: GenericContext,\n recipient_name: str,\n amount: str,\n memos: list[str],\n) -> Any:\n memos_str = \"\\n\".join(memos)\n return await _placeholder_confirm(\n ctx,\n \"confirm_payment_request\",\n \"CONFIRM SENDING\",\n description=f\"{amount} to\\n{recipient_name}\\n{memos_str}\",\n br_code=ButtonRequestType.ConfirmOutput,\n )\n\n\nasync def should_show_more(\n ctx: GenericContext,\n title: str,\n para: Iterable[tuple[int, str]],\n button_text: str = \"Show all\",\n br_type: str = \"should_show_more\",\n br_code: ButtonRequestType = BR_TYPE_OTHER,\n confirm: str | bytes | None = None,\n) -> bool:\n \"\"\"Return True if the user wants to show more (they click a special button)\n and False when the user wants to continue without showing details.\n\n Raises ActionCancelled if the user cancels.\n \"\"\"\n if confirm is None or not isinstance(confirm, str):\n confirm = \"CONFIRM\"\n\n result = await interact(\n ctx,\n RustLayout(\n trezorui2.confirm_with_info(\n title=title.upper(),\n items=para,\n button=confirm.upper(),\n info_button=button_text.upper(),\n )\n ),\n br_type,\n br_code,\n )\n\n if result is CONFIRMED:\n return False\n elif result is INFO:\n return True\n else:\n assert result is CANCELLED\n raise ActionCancelled\n\n\nasync def confirm_blob(\n ctx: GenericContext,\n br_type: str,\n title: str,\n data: bytes | str,\n description: str | None = None,\n hold: bool = False,\n br_code: ButtonRequestType = BR_TYPE_OTHER,\n ask_pagination: bool = False,\n) -> None:\n title = title.upper()\n description = description or \"\"\n layout = RustLayout(\n trezorui2.confirm_blob(\n title=title,\n description=description,\n data=data,\n extra=None,\n hold=hold,\n )\n )\n\n await raise_if_not_confirmed(\n interact(\n ctx,\n layout,\n br_type,\n br_code,\n )\n )\n\n\nasync def confirm_address(\n ctx: GenericContext,\n title: str,\n address: str,\n description: str | None = \"Address:\",\n br_type: str = \"confirm_address\",\n br_code: ButtonRequestType = BR_TYPE_OTHER,\n) -> Awaitable[None]:\n return confirm_blob(\n ctx,\n br_type,\n title.upper(),\n address,\n description,\n br_code=br_code,\n )\n\n\nasync def confirm_text(\n ctx: GenericContext,\n br_type: str,\n title: str,\n data: str,\n description: str | None = None,\n br_code: ButtonRequestType = BR_TYPE_OTHER,\n) -> Any:\n return await _placeholder_confirm(\n ctx,\n br_type,\n title,\n data,\n description,\n br_code=br_code,\n )\n\n\ndef confirm_amount(\n ctx: GenericContext,\n title: str,\n amount: str,\n description: str = \"Amount:\",\n br_type: str = \"confirm_amount\",\n br_code: ButtonRequestType = BR_TYPE_OTHER,\n) -> Awaitable[None]:\n return confirm_blob(\n ctx,\n br_type,\n title.upper(),\n amount,\n description,\n br_code=br_code,\n )\n\n\nasync def confirm_properties(\n ctx: GenericContext,\n br_type: str,\n title: str,\n props: Iterable[PropertyType],\n hold: bool = False,\n br_code: ButtonRequestType = ButtonRequestType.ConfirmOutput,\n) -> None:\n from ubinascii import hexlify\n\n def handle_bytes(prop: PropertyType):\n if isinstance(prop[1], bytes):\n return (prop[0], hexlify(prop[1]).decode(), True)\n else:\n # When there is not space in the text, taking it as data\n # to not include hyphens\n is_data = prop[1] and \" \" not in prop[1]\n return (prop[0], prop[1], is_data)\n\n await raise_if_not_confirmed(\n interact(\n ctx,\n RustLayout(\n trezorui2.confirm_properties(\n title=title.upper(),\n items=map(handle_bytes, props), # type: ignore [cannot be assigned to parameter \"items\"]\n hold=hold,\n )\n ),\n br_type,\n br_code,\n )\n )\n\n\ndef confirm_value(\n ctx: GenericContext,\n title: str,\n value: str,\n description: str,\n br_type: str,\n br_code: ButtonRequestType = BR_TYPE_OTHER,\n *,\n verb: str | None = None,\n hold: bool = False,\n) -> Awaitable[None]:\n \"\"\"General confirmation dialog, used by many other confirm_* functions.\"\"\"\n\n if not verb and not hold:\n raise ValueError(\"Either verb or hold=True must be set\")\n\n return raise_if_not_confirmed(\n interact(\n ctx,\n RustLayout(\n trezorui2.confirm_value( # type: ignore [Argument missing for parameter \"subtitle\"]\n title=title.upper(),\n description=description,\n value=value,\n verb=verb or \"HOLD TO CONFIRM\",\n hold=hold,\n )\n ),\n br_type,\n br_code,\n )\n )\n\n\nasync def confirm_total(\n ctx: GenericContext,\n total_amount: str,\n fee_amount: str,\n fee_rate_amount: str | None = None,\n title: str = \"SENDING\",\n total_label: str = \"TOTAL AMOUNT\",\n fee_label: str = \"Including fee:\",\n account_label: str | None = None,\n br_type: str = \"confirm_total\",\n br_code: ButtonRequestType = ButtonRequestType.SignTx,\n) -> None:\n await raise_if_not_confirmed(\n interact(\n ctx,\n RustLayout(\n # TODO: resolve these differences in TT's and TR's confirm_total\n trezorui2.confirm_total( # type: ignore [Arguments missing]\n total_amount=total_amount, # type: ignore [No parameter named]\n fee_amount=fee_amount, # type: ignore [No parameter named]\n fee_rate_amount=fee_rate_amount, # type: ignore [No parameter named]\n account_label=account_label, # type: ignore [No parameter named]\n total_label=total_label.upper(), # type: ignore [No parameter named]\n fee_label=fee_label, # type: ignore [No parameter named]\n )\n ),\n br_type,\n br_code,\n )\n )\n\n\nasync def confirm_joint_total(\n ctx: GenericContext, spending_amount: str, total_amount: str\n) -> None:\n\n await raise_if_not_confirmed(\n interact(\n ctx,\n RustLayout(\n trezorui2.confirm_joint_total(\n spending_amount=spending_amount,\n total_amount=total_amount,\n )\n ),\n \"confirm_joint_total\",\n ButtonRequestType.SignTx,\n )\n )\n\n\nasync def confirm_metadata(\n ctx: GenericContext,\n br_type: str,\n title: str,\n content: str,\n param: str | None = None,\n br_code: ButtonRequestType = ButtonRequestType.SignTx,\n hold: bool = False,\n) -> None:\n await _placeholder_confirm(\n ctx,\n br_type,\n title.upper(),\n description=content.format(param),\n hold=hold,\n br_code=br_code,\n )\n\n\nasync def confirm_replacement(ctx: GenericContext, description: str, txid: str) -> None:\n await confirm_value(\n ctx,\n description.upper(),\n txid,\n \"Confirm transaction ID:\",\n \"confirm_replacement\",\n ButtonRequestType.SignTx,\n verb=\"CONFIRM\",\n )\n\n\nasync def confirm_modify_output(\n ctx: GenericContext,\n address: str,\n sign: int,\n amount_change: str,\n amount_new: str,\n) -> None:\n await raise_if_not_confirmed(\n interact(\n ctx,\n RustLayout(\n trezorui2.confirm_modify_output(\n address=address,\n sign=sign,\n amount_change=amount_change,\n amount_new=amount_new,\n )\n ),\n \"modify_output\",\n ButtonRequestType.ConfirmOutput,\n )\n )\n\n\nasync def confirm_modify_fee(\n ctx: GenericContext,\n title: str,\n sign: int,\n user_fee_change: str,\n total_fee_new: str,\n fee_rate_amount: str | None = None,\n) -> None:\n await raise_if_not_confirmed(\n interact(\n ctx,\n RustLayout(\n trezorui2.confirm_modify_fee(\n title=title,\n sign=sign,\n user_fee_change=user_fee_change,\n total_fee_new=total_fee_new,\n fee_rate_amount=fee_rate_amount,\n )\n ),\n \"modify_fee\",\n ButtonRequestType.SignTx,\n )\n )\n\n\nasync def confirm_coinjoin(\n ctx: GenericContext, max_rounds: int, max_fee_per_vbyte: str\n) -> None:\n await raise_if_not_confirmed(\n interact(\n ctx,\n RustLayout(\n trezorui2.confirm_coinjoin(\n max_rounds=str(max_rounds),\n max_feerate=max_fee_per_vbyte,\n )\n ),\n \"coinjoin_final\",\n BR_TYPE_OTHER,\n )\n )\n\n\n# TODO cleanup @ redesign\nasync def confirm_sign_identity(\n ctx: GenericContext, proto: str, identity: str, challenge_visual: str | None\n) -> None:\n text = \"\"\n if challenge_visual:\n text += f\"{challenge_visual}\\n\\n\"\n text += identity\n\n await _placeholder_confirm(\n ctx,\n \"confirm_sign_identity\",\n f\"Sign {proto}\".upper(),\n text,\n br_code=BR_TYPE_OTHER,\n )\n\n\nasync def confirm_signverify(\n ctx: GenericContext, coin: str, message: str, address: str, verify: bool\n) -> None:\n if verify:\n header = f\"Verify {coin} message\"\n br_type = \"verify_message\"\n else:\n header = f\"Sign {coin} message\"\n br_type = \"sign_message\"\n\n await confirm_blob(\n ctx,\n br_type,\n header.upper(),\n address,\n \"Confirm address:\",\n br_code=BR_TYPE_OTHER,\n )\n\n await confirm_value(\n ctx,\n header.upper(),\n message,\n \"Confirm message:\",\n br_type,\n BR_TYPE_OTHER,\n verb=\"CONFIRM\",\n )\n\n\nasync def show_error_popup(\n title: str,\n description: str,\n subtitle: str | None = None,\n description_param: str = \"\",\n *,\n button: str = \"\",\n timeout_ms: int = 0,\n) -> None:\n if button:\n raise NotImplementedError(\"Button not implemented\")\n description = description.format(description_param)\n if subtitle:\n description = f\"{subtitle}\\n{description}\"\n await RustLayout(\n trezorui2.show_info(\n title=title,\n description=description,\n time_ms=timeout_ms,\n )\n )\n\n\ndef request_passphrase_on_host() -> None:\n draw_simple(\n trezorui2.show_info(\n title=\"HIDDEN WALLET\",\n description=\"Please type your passphrase on the connected host.\",\n )\n )\n\n\nasync def request_passphrase_on_device(ctx: GenericContext, max_len: int) -> str:\n await button_request(\n ctx, \"passphrase_device\", code=ButtonRequestType.PassphraseEntry\n )\n\n result = await ctx.wait(\n RustLayout(\n trezorui2.request_passphrase(\n prompt=\"ENTER PASSPHRASE\",\n max_len=max_len,\n )\n )\n )\n if result is CANCELLED:\n raise ActionCancelled(\"Passphrase entry cancelled\")\n\n assert isinstance(result, str)\n return result\n\n\nasync def request_pin_on_device(\n ctx: GenericContext,\n prompt: str,\n attempts_remaining: int | None,\n allow_cancel: bool,\n wrong_pin: bool = False,\n) -> str:\n from trezor import wire\n\n # Not showing the prompt in case user did not enter it badly yet\n # (has full 16 attempts left)\n if attempts_remaining is None or attempts_remaining == 16:\n subprompt = \"\"\n elif attempts_remaining == 1:\n subprompt = \"Last attempt\"\n else:\n subprompt = f\"{attempts_remaining} tries left\"\n\n await button_request(ctx, \"pin_device\", code=ButtonRequestType.PinEntry)\n\n dialog = RustLayout(\n trezorui2.request_pin(\n prompt=prompt,\n subprompt=subprompt,\n allow_cancel=allow_cancel,\n wrong_pin=wrong_pin,\n )\n )\n\n result = await ctx.wait(dialog)\n if result is CANCELLED:\n raise wire.PinCancelled\n assert isinstance(result, str)\n return result\n\n\nasync def confirm_reenter_pin(\n ctx: GenericContext,\n is_wipe_code: bool = False,\n) -> None:\n br_type = \"reenter_wipe_code\" if is_wipe_code else \"reenter_pin\"\n title = \"CHECK WIPE CODE\" if is_wipe_code else \"CHECK PIN\"\n return await confirm_action(\n ctx,\n br_type,\n title,\n action=\"Please re-enter to confirm.\",\n verb=\"BEGIN\",\n br_code=BR_TYPE_OTHER,\n )\n\n\nasync def pin_mismatch_popup(\n ctx: GenericContext,\n is_wipe_code: bool = False,\n) -> None:\n title = \"WIPE CODE MISMATCH\" if is_wipe_code else \"PIN MISMATCH\"\n description = \"wipe codes\" if is_wipe_code else \"PINs\"\n return await confirm_action(\n ctx,\n \"pin_mismatch\",\n title,\n description=f\"The {description} you entered do not match.\\nPlease try again.\",\n verb=\"TRY AGAIN\",\n verb_cancel=None,\n br_code=BR_TYPE_OTHER,\n )\n\n\nasync def wipe_code_same_as_pin_popup(\n ctx: GenericContext,\n is_wipe_code: bool = False,\n) -> None:\n return await confirm_action(\n ctx,\n \"wipe_code_same_as_pin\",\n \"INVALID WIPE CODE\",\n description=\"The wipe code must be different from your PIN.\\nPlease try again.\",\n verb=\"TRY AGAIN\",\n verb_cancel=None,\n br_code=BR_TYPE_OTHER,\n )\n\n\nasync def confirm_set_new_pin(\n ctx: GenericContext,\n br_type: str,\n title: str,\n description: str,\n information: list[str],\n br_code: ButtonRequestType = BR_TYPE_OTHER,\n) -> None:\n await confirm_action(\n ctx,\n br_type,\n title,\n description=description,\n verb=\"ENABLE\",\n br_code=br_code,\n )\n\n # Additional information for the user to know about PIN/WIPE CODE\n\n if \"wipe_code\" in br_type:\n verb = \"HODL TO BEGIN\" # Easter egg from @Hannsek\n else:\n information.append(\n \"Position of individual numbers will change between entries for enhanced security.\"\n )\n verb = \"HOLD TO BEGIN\"\n\n return await confirm_action(\n ctx,\n br_type,\n \"\",\n description=\"\\n\\r\".join(information),\n verb=verb,\n hold=True,\n br_code=br_code,\n )\n\n\nasync def mnemonic_word_entering(ctx: GenericContext) -> None:\n await confirm_action(\n ctx,\n \"request_word\",\n \"WORD ENTERING\",\n description=\"You'll only have to select the first 2-3 letters.\",\n verb=\"CONTINUE\",\n verb_cancel=None,\n br_code=ButtonRequestType.MnemonicInput,\n )\n","repo_name":"Migos24/firmware-master","sub_path":"firmware-master/core/src/trezor/ui/layouts/tr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":36434,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"71211526313","text":"import argparse\nimport os\nfrom dataset import get_loader, get_loader_test\nfrom solver import Solver\n\n\ndef main(config):\n if config.mode == 'train':\n pass\n elif config.mode == 'test':\n test_loader = get_loader_test(config.test_path, config.test_label, config.img_size, config.batch_size,\n mode='test',\n filename=config.test_file, num_thread=config.num_thread)\n if not os.path.exists(config.test_fold): os.mkdir(config.test_fold)\n test = Solver(None, None, test_loader, config)\n test.test()\n\n else:\n raise IOError(\"illegal input!!!\")\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n \n # Hyper-parameters\n parser.add_argument('--n_color', type=int, default=3)\n parser.add_argument('--lr', type=float, default=3e-3)\n parser.add_argument('--clip_gradient', type=float, default=1.0)\n parser.add_argument('--cuda', type=bool, default=True)\n\n # Training settings\n parser.add_argument('--multi_gpu', type=bool, default=True)\n parser.add_argument('--vgg', type=str, default='')\n parser.add_argument('--train_path', type=str, default='')\n parser.add_argument('--label_path', type=str, default='')\n parser.add_argument('--img_size', type=int, default=None) # 256\n parser.add_argument('--epoch', type=int, default=100)\n parser.add_argument('--batch_size', type=int, default=1) # 8\n parser.add_argument('--val', type=bool, default=True)\n parser.add_argument('--val_path', type=str, default='')\n parser.add_argument('--val_label', type=str, default='')\n\n parser.add_argument('--num_thread', type=int, default=4)\n parser.add_argument('--load', type=str, default='')\n parser.add_argument('--save_fold', type=str, default='./results')\n parser.add_argument('--epoch_val', type=int, default=1)\n parser.add_argument('--epoch_save', type=int, default=1)\n parser.add_argument('--epoch_show', type=int, default=1)\n parser.add_argument('--pre_trained', type=str, default=None)\n\n # Testing settings\n parser.add_argument('--backbone', type=str, default='Res18') # Res18, Res18Fixed\n parser.add_argument('--test_path', type=str, default='')\n parser.add_argument('--test_label', type=str, default='')\n parser.add_argument('--test_file', type=str, default=None)\n parser.add_argument('--model', type=str, default='')\n parser.add_argument('--test_fold', type=str, default='')\n parser.add_argument('--use_crf', type=bool, default=False)\n\n # Misc\n parser.add_argument('--mode', type=str, default='test', choices=['train', 'test'])\n parser.add_argument('--visdom', type=bool, default=False)\n\n config = parser.parse_args()\n if config.test_file is None:\n if 'SALICON/images/test' in config.test_path:\n config.test_fold = os.path.join(config.save_fold, 'Test_SALICON')\n elif 'SALICON/images/val' in config.test_path:\n config.test_fold = os.path.join(config.save_fold, 'Val_SALICON')\n elif 'MIT1003/val' in config.test_path:\n config.test_fold = os.path.join(config.save_fold, 'Val_MIT1003')\n elif 'MIT1003/all' in config.test_path:\n config.test_fold = os.path.join(config.save_fold, 'All_MIT1003')\n elif 'MIT300' in config.test_path:\n config.test_fold = os.path.join(config.save_fold, 'Test_MIT300')\n elif 'CAT2000' in config.test_path:\n config.test_fold = os.path.join(config.save_fold, 'Test_CAT2000')\n elif 'PseudoSal' in config.test_path:\n config.test_fold = os.path.join(config.save_fold, 'Val_PseudoSal')\n elif 'DUT-OMRON' in config.test_path:\n config.test_fold = os.path.join(config.save_fold, 'Test_DUT-OMRON')\n elif 'PASCAL-S' in config.test_path:\n config.test_fold = os.path.join(config.save_fold, 'Test_PASCAL-S')\n elif 'TORONTO' in config.test_path:\n config.test_fold = os.path.join(config.save_fold, 'Test_TORONTO')\n else:\n raise NotImplementedError\n else:\n if 'Eye_Fixation_Test_SALICON' in config.test_file:\n config.test_fold = os.path.join(config.save_fold, 'Test_SALICON')\n elif 'Eye_Fixation_Val_SALICON' in config.test_file:\n config.test_fold = os.path.join(config.save_fold, 'Val_SALICON')\n elif 'Eye_Fixation_Val_MIT1003' in config.test_file:\n config.test_fold = os.path.join(config.save_fold, 'Val_MIT1003')\n elif 'Eye_Fixation_All_MIT1003' in config.test_file:\n config.test_fold = os.path.join(config.save_fold, 'All_MIT1003')\n elif 'Eye_Fixation_Test_MIT300' in config.test_file:\n config.test_fold = os.path.join(config.save_fold, 'Test_MIT300')\n elif 'Eye_Fixation_Test_CAT2000' in config.test_file:\n config.test_fold = os.path.join(config.save_fold, 'Test_CAT2000')\n elif 'Val_Eye_Fixation_PseudoSal_all' in config.test_file:\n config.test_fold = os.path.join(config.save_fold, 'Val_PseudoSal')\n elif 'Eye_Fixation_Train_DUT-OMRON' in config.test_file:\n config.test_fold = os.path.join(config.save_fold, 'Test_DUT-OMRON')\n elif 'Eye_Fixation_Train_PASCAL-S' in config.test_file:\n config.test_fold = os.path.join(config.save_fold, 'Test_PASCAL-S')\n elif 'Eye_Fixation_Train_TORONTO' in config.test_file:\n config.test_fold = os.path.join(config.save_fold, 'Test_TORONTO')\n else:\n raise NotImplementedError\n\n if not os.path.exists(config.save_fold): os.mkdir(config.save_fold)\n main(config)\n","repo_name":"gqding/SalFBNet","sub_path":"main_test.py","file_name":"main_test.py","file_ext":"py","file_size_in_byte":5639,"program_lang":"python","lang":"fa","doc_type":"code","stars":15,"dataset":"github-code","pt":"72"}
+{"seq_id":"30151286640","text":"\nfrom torch.utils.data import DataLoader\nimport copy\nfrom progress.bar import Bar\nimport config\nimport os\nimport numpy as np\nimport torch\nimport cv2\nimport matplotlib.pyplot as plt\nfrom utils import torch_op\nimport util\nfrom RPModule.rpmodule import RelativePoseEstimation,getMatchingPrimitive,RelativePoseEstimation_helper\nfrom RPModule.rputil import opts\nimport argparse\nfrom model.mymodel import SCNet\nimport time\nfrom baselines import super4pcs, open3d_global_registration, open3d_fast_global_registration,open3d_color_registration\nfrom open3d import *\nimport logging\n\n\ndef getLoader(args):\n testOption='test'\n if 'suncg' in args.dataList:\n from datasets.SUNCG import SUNCG as Dataset\n dataset_name='suncg'\n val_dataset = Dataset(testOption, nViews=config.nViews,meta=False,rotate=False,rgbd=True,hmap=False,segm=True,normal=True,list_=f\"./data/dataList/{args.dataList}.npy\",singleView=0,entrySplit=args.entrySplit)\n elif 'matterport' in args.dataList:\n from datasets.Matterport3D import Matterport3D as Dataset\n dataset_name='matterport'\n val_dataset = Dataset(testOption, nViews=config.nViews,meta=False,rotate=False,rgbd=True,hmap=False,segm=True,normal=True,list_=f\"./data/dataList/{args.dataList}.npy\",singleView=0,entrySplit=args.entrySplit)\n elif 'scannet' in args.dataList:\n from datasets.ScanNet import ScanNet as Dataset\n dataset_name='scannet'\n val_dataset = Dataset(testOption, nViews=config.nViews,meta=False,rotate=False,rgbd=True,hmap=False,segm=True,normal=True,list_=f\"./data/dataList/{args.dataList}.npy\",singleView=0,fullsize_rgbdn=True,entrySplit=args.entrySplit,representation=args.representation)\n if args.debug:\n loader = DataLoader(val_dataset, batch_size=1, shuffle=False,drop_last=True,collate_fn=util.collate_fn_cat, worker_init_fn=util.worker_init_fn)\n else:\n loader = DataLoader(val_dataset, batch_size=1, shuffle=False,num_workers=1,drop_last=True,collate_fn=util.collate_fn_cat, worker_init_fn=util.worker_init_fn)\n return dataset_name,loader\n\ndef _parse_args():\n \n parser = argparse.ArgumentParser(description='Optional app description')\n parser.add_argument('--dataList', type = str, default = 'matterport3dv1', help = 'options: suncgv3,scannetv1,matterport3dv1')\n parser.add_argument('--sigmaDist',type=float, default=0.04, help = 'parameter for our pairwise matching algorithm')\n parser.add_argument('--sigmaAngle1',type=float, default=0.2615,help = 'parameter for our pairwise matching algorithm')\n parser.add_argument('--sigmaAngle2',type=float, default=0.2615, help = 'parameter for our pairwise matching algorithm')\n parser.add_argument('--sigmaFeat',type=float, default=0.01, help = 'parameter for our pairwise matching algorithm')\n parser.add_argument('--maxIter',type=int,default=1000, help = 'number of pairs to be tested')\n parser.add_argument('--outputType',type=str,default='rgbdnsf', help = 'types of output')\n parser.add_argument('--debug',action='store_true', help = 'for debug')\n parser.add_argument('--exp',type=str,default='', help = 'will create a folder with such name under experiments/')\n parser.add_argument('--snumclass',type=int,default=15, help = 'number of semantic class')\n parser.add_argument('--featureDim',type=int,default=32, help = 'feature dimension')\n parser.add_argument('--maskMethod',type=str,default='second',help='observe the second view')\n parser.add_argument('--d',type=str,default='', help = '')\n parser.add_argument('--entrySplit',type=int,default=None, help = 'use for parallel eval')\n parser.add_argument('--representation',type=str,default='skybox')\n parser.add_argument('--method',type=str,choices=['ours','ours_nc','ours_nr','super4pcs','fgs','gs','cgs'],default='ours',help='ours,super4pcs,fgs(fast global registration)')\n parser.add_argument('--useTanh', type = int, default = 1, help = 'whether to use tanh layer on feature maps')\n parser.add_argument('--saveCompletion', type = int, default = 1, help = 'save the completion result')\n parser.add_argument('--batchnorm', type = int, default = 1, help = 'whether to use batch norm in completion network')\n parser.add_argument('--skipLayer', type = int, default = 1, help = 'whether to use skil connection in completion network')\n parser.add_argument('--num_repeat', type = int, default = 1, help = 'repeat times')\n parser.add_argument('--rm',action='store_true',help='will remove previous evaluation named args.exp')\n parser.add_argument('--para', type = str, default=None,help = 'file specify parameters for pairwise matching module')\n parser.add_argument(\"-l\", \"--log\", dest=\"logLevel\", choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], help=\"Set the logging level\")\n\n args = parser.parse_args()\n if args.d: os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.d\n args.alterStep = 1 if args.method == 'ours_nr' else 3\n args.completion = 0 if args.method == 'ours_nc' else 1\n args.snumclass = 15 if 'suncg' in args.dataList else 21\n if args.logLevel:\n logging.basicConfig(level=getattr(logging, args.logLevel))\n\n print(\"\\n parameters... *******************************\\n\")\n print(f\"evaluate on {args.dataList}\")\n print(f\"using method: {args.method}\")\n print(f\"mask method: {args.maskMethod}\")\n if 'ours' in args.method:\n print(f\"output type: {args.outputType}\")\n print(f\"semantic classes: {args.snumclass}\")\n print(f\"feature dimension: {args.featureDim}\")\n print(f\"skipLayer: {args.skipLayer}\")\n print(\"\\n parameters... *******************************\\n\")\n time.sleep(5)\n\n\n args.rpm_para = opts()\n \n args.perStepPara = False\n if args.para is not None:\n para_val = np.loadtxt(args.para).reshape(-1,4)\n args.rpm_para.sigmaAngle1 = para_val[:,0]\n args.rpm_para.sigmaAngle2 = para_val[:,1]\n args.rpm_para.sigmaDist = para_val[:,2]\n args.rpm_para.sigmaFeat = para_val[:,3]\n args.perStepPara = True\n else:\n if args.sigmaAngle1: args.rpm_para.sigmaAngle1 = args.sigmaAngle1\n if args.sigmaAngle2: args.rpm_para.sigmaAngle2 = args.sigmaAngle2\n if args.sigmaDist: args.rpm_para.sigmaDist = args.sigmaDist\n if args.sigmaFeat: args.rpm_para.sigmaFeat = args.sigmaFeat\n\n return args\n\nif __name__ == '__main__':\n \n args = _parse_args()\n log = logging.getLogger(__name__)\n\n if not os.path.exists(\"tmp/rpe\"):\n os.makedirs(\"tmp/rpe\")\n exp_dir = f\"tmp/rpe/{args.exp}\"\n if not os.path.exists(exp_dir):\n os.makedirs(exp_dir)\n\n dataset_name,loader = getLoader(args)\n bar = Bar('Progress', max=len(loader))\n\n speedBenchmark=[]\n Overlaps = ['0-0.1','0.1-0.5','0.5-1.0']\n adstatsOverlaps = {it:[] for it in Overlaps}\n transstatsOverlaps = {it:[] for it in Overlaps}\n error_stats=[]\n if not args.rm:\n if os.path.exists(f\"{exp_dir}/{args.exp}.result.npy\"):\n error_stats+=np.load(f\"{exp_dir}/{args.exp}.result.npy\").tolist()\n n_run = len(error_stats)//100\n args.num_repeat -= n_run\n \n if 'ours' in args.method:\n # setup division point of outputs\n args.idx_f_start = 3+3+1+args.snumclass\n args.idx_f_end = args.idx_f_start + args.featureDim\n\n # initialize network and load checkpoint\n net=SCNet(args).cuda()\n try:\n if 'suncg' in args.dataList:\n checkpoint = torch.load('./data/pretrained_model/suncg.comp.pth.tar')\n elif 'matterport' in args.dataList:\n checkpoint = torch.load('./data/pretrained_model/matterport.comp.pth.tar')\n elif 'scannet' in args.dataList:\n checkpoint = torch.load('./data/pretrained_model/scannet.comp.pth.tar')\n except:\n raise Exception(\"please provide the pretrained model.\")\n\n state_dict = checkpoint['state_dict']\n net.load_state_dict(state_dict)\n net.cuda()\n\n for _ in range(args.num_repeat):\n\n for i, data in enumerate(loader):\n st = time.time()\n np.random.seed()\n\n # initialize data\n rgb,depth,R,Q,norm,imgPath,segm=data['rgb'],data['depth'],data['R'],data['Q'],data['norm'],data['imgsPath'],data['segm']\n # use origin size scan for baselines on scannet dataset \n if 'scannet' in args.dataList and 'ours' not in args.method:\n rgb,depth = data['rgb_full'], data['depth_full']\n R = torch_op.npy(R)\n rgb = torch_op.npy(rgb*255).clip(0,255).astype('uint8')\n norm = torch_op.npy(norm)\n depth = torch_op.npy(depth)\n segm = torch_op.npy(segm)\n \n R_src = R[0,0,:,:]\n R_tgt = R[0,1,:,:]\n R_gt_44 = np.matmul(R_tgt,np.linalg.inv(R_src))\n R_gt = R_gt_44[:3,:3]\n\n # generate source/target scans, point cloud\n depth_src,depth_tgt,normal_src,normal_tgt,color_src,color_tgt,pc_src,pc_tgt = util.parse_data(depth,rgb,norm,args.dataList,args.method)\n\n if len(pc_src) == 0 or len(pc_tgt)==0:\n print(f\"this point cloud file contain no point\")\n continue\n\n # compute overlap and other stats\n overlap_val,cam_dist_this,pc_dist_this,pc_nn = util.point_cloud_overlap(pc_src, pc_tgt, R_gt_44)\n overlap = '0-0.1' if overlap_val <= 0.1 else '0.1-0.5' if overlap_val <= 0.5 else '0.5-1.0'\n\n # do not test non-overlap with traditional method since make no sense.\n if args.method in ['fgs','gs','super4pcs','cgs'] and overlap_val < 0.1:\n continue\n\n # select which method to evaluate\n if args.method == 'super4pcs':\n R_hat = super4pcs(pc_src, pc_tgt)\n elif args.method == 'fgs':\n R_hat = open3d_fast_global_registration(pc_src,pc_tgt)\n elif args.method == 'gs':\n R_hat = open3d_global_registration(pc_src,pc_tgt)\n elif args.method == 'cgs':\n R_hat = open3d_color_registration(pc_src,pc_tgt, color_src,color_tgt)\n elif 'ours' in args.method:\n with torch.set_grad_enabled(False):\n\n data_s = {'rgb': rgb[0,0,:,:,:].transpose(1,2,0),\n 'depth': depth[0,0,:,:],\n 'normal':norm[0,0,:,:,:].transpose(1,2,0),\n 'R': R[0,0,:,:]}\n data_t = {'rgb': rgb[0,1,:,:,:].transpose(1,2,0),\n 'depth': depth[0,1,:,:],\n 'normal':norm[0,1,:,:,:].transpose(1,2,0),\n 'R': R[0,1,:,:]}\n\n R_hat = np.eye(4)\n\n # get the complete scans\n complete_s=torch.cat((torch_op.v(data['rgb'][:,0,:,:,:]),torch_op.v(data['norm'][:,0,:,:,:]),torch_op.v(data['depth'][:,0:1,:,:])),1)\n complete_t=torch.cat((torch_op.v(data['rgb'][:,1,:,:,:]),torch_op.v(data['norm'][:,1,:,:,:]),torch_op.v(data['depth'][:,1:2,:,:])),1)\n\n # apply the observation mask\n view_s,mask_s,_ = util.apply_mask(complete_s.clone(),args.maskMethod)\n view_t,mask_t,_ = util.apply_mask(complete_t.clone(),args.maskMethod)\n mask_s=torch_op.npy(mask_s[0,:,:,:]).transpose(1,2,0)\n mask_t=torch_op.npy(mask_t[0,:,:,:]).transpose(1,2,0)\n\n # append mask for valid data\n tpmask = (view_s[:,6:7,:,:]!=0).float().cuda()\n view_s=torch.cat((view_s,tpmask),1)\n tpmask = (view_t[:,6:7,:,:]!=0).float().cuda()\n view_t=torch.cat((view_t,tpmask),1)\n\n for alter_ in range(args.alterStep):\n \n # warp the second scan using current transformation estimation\n view_t2s=torch_op.v(util.warping(torch_op.npy(view_t),np.linalg.inv(R_hat),args.dataList))\n view_s2t=torch_op.v(util.warping(torch_op.npy(view_s),R_hat,args.dataList))\n # append the warped scans\n view0 = torch.cat((view_s,view_t2s),1)\n view1 = torch.cat((view_t,view_s2t),1)\n\n # generate complete scans\n f=net(torch.cat((view0,view1)))\n f0=f[0:1,:,:,:]\n f1=f[1:2,:,:,:]\n \n data_sc,data_tc={},{}\n # replace the observed region with observed depth/normal\n data_sc['normal'] = (1-mask_s)*torch_op.npy(f0[0,3:6,:,:]).transpose(1,2,0)+mask_s*data_s['normal']\n data_tc['normal'] = (1-mask_t)*torch_op.npy(f1[0,3:6,:,:]).transpose(1,2,0)+mask_t*data_t['normal']\n data_sc['normal']/= (np.linalg.norm(data_s['normal'],axis=2,keepdims=True)+1e-6)\n data_tc['normal']/= (np.linalg.norm(data_t['normal'],axis=2,keepdims=True)+1e-6)\n data_sc['depth'] = (1-mask_s[:,:,0])*torch_op.npy(f0[0,6,:,:])+mask_s[:,:,0]*data_s['depth']\n data_tc['depth'] = (1-mask_t[:,:,0])*torch_op.npy(f1[0,6,:,:])+mask_t[:,:,0]*data_t['depth']\n data_sc['obs_mask'] = mask_s.copy()\n data_tc['obs_mask'] = mask_t.copy()\n data_sc['rgb'] = (mask_s*data_s['rgb']).astype('uint8')\n data_tc['rgb'] = (mask_t*data_t['rgb']).astype('uint8')\n \n # for scannet, we use the original size rgb image(480x640) to extract sift keypoint\n if 'scannet' in args.dataList:\n data_sc['rgb_full'] = (torch_op.npy(data['rgb_full'][0,0,:,:,:])*255).astype('uint8')\n data_tc['rgb_full'] = (torch_op.npy(data['rgb_full'][0,1,:,:,:])*255).astype('uint8')\n data_sc['depth_full'] = torch_op.npy(data['depth_full'][0,0,:,:])\n data_tc['depth_full'] = torch_op.npy(data['depth_full'][0,1,:,:])\n \n # extract feature maps\n f0_feat=f0[:,args.idx_f_start:args.idx_f_end,:,:]\n f1_feat=f1[:,args.idx_f_start:args.idx_f_end,:,:]\n data_sc['feat']=f0_feat.squeeze(0)\n data_tc['feat']=f1_feat.squeeze(0)\n\n # run relative pose module to get next estimate\n if args.perStepPara:\n para_this = opts(args.rpm_para.sigmaAngle1[alter_],args.rpm_para.sigmaAngle2[alter_],args.rpm_para.sigmaDist[alter_],args.rpm_para.sigmaFeat[alter_])\n else:\n para_this = args.rpm_para\n\n pts3d,ptt3d,ptsns,ptsnt,dess,dest,ptsW,pttW = getMatchingPrimitive(data_sc,data_tc,dataset_name,args.representation,args.completion)\n # early return if too few keypoint detected\n if pts3d is None or ptt3d is None or pts3d.shape[0]<2 or pts3d.shape[0]<2:\n logging.info(f\"no pts detected or less than 2 keypoint detected, return identity: {np.eye(3)}\")\n R_hat = np.eye(4)\n else:\n R_hat = RelativePoseEstimation_helper({'pc':pts3d.T,'normal':ptsns,'feat':dess,'weight':ptsW},{'pc':ptt3d.T,'normal':ptsnt,'feat':dest,'weight':pttW},para_this)\n\n # average speed\n time_this = time.time()-st\n speedBenchmark.append(time_this)\n \n # compute rotation error and translation error\n t_hat = R_hat[:3,3]\n R_hat = R_hat[:3,:3]\n \n ad_this = util.angular_distance_np(R_hat, R_gt[np.newaxis,:,:])[0]\n ad_blind_this = util.angular_distance_np(R_gt[np.newaxis,:,:],np.eye(3)[np.newaxis,:,:])[0]\n translation_this = np.linalg.norm(np.matmul((R_hat - R_gt_44[:3,:3]),pc_src.mean(0).reshape(3)) + t_hat - R_gt_44[:3,3])\n translation_blind_this = np.linalg.norm(t_hat - R_gt_44[:3,3])\n\n # save result for this pair\n R_pred_44=np.eye(4)\n R_pred_44[:3,:3]=R_hat\n R_pred_44[:3,3]=t_hat\n error_stats.append({'img_src':imgPath[0][0],'img_tgt':imgPath[1][0], 'err_ad':ad_this,\n 'err_t':translation_this,'err_blind':ad_blind_this,'err_t_blind':translation_blind_this,'overlap':overlap_val,'pc_dist':pc_dist_this,\n 'cam_dist':cam_dist_this,'pc_nearest':pc_nn,'R_gt':R_gt_44,'R_pred_44':R_pred_44})\n \n # update statics\n adstatsOverlaps[overlap].append(ad_this)\n transstatsOverlaps[overlap].append(translation_this)\n\n # print log\n log.info(f\"average processing time per pair: {np.sum(speedBenchmark)/len(speedBenchmark)}\")\n log.info(f\"imgPath:{imgPath},R_hat:{R_hat}\")\n log.info(f\"ad/ad_blind this :{ad_this}/{ad_blind_this}\\n\")\n\n # print progress bar\n Bar.suffix = '{dataset:10}: [{0:3}/{1:3}] | Total: {total:} | ETA: {eta:}'.format(i, len(loader), total=bar.elapsed_td, eta=bar.eta_td,dataset=dataset_name)\n bar.next()\n if (i+1) % 100 == 0:\n np.save(f\"{exp_dir}/{args.exp}.result.npy\",error_stats)\n sss=''\n for overlap in Overlaps:\n sss += f\"rotation, overlap:{overlap},nobs:{len(adstatsOverlaps[overlap])}, mean:{np.mean(adstatsOverlaps[overlap])} \"\n print(sss)\n sss=''\n for overlap in Overlaps:\n sss += f\"translation, overlap:{overlap},nobs:{len(transstatsOverlaps[overlap])}, mean:{np.mean(transstatsOverlaps[overlap])} \"\n print(sss)\n\n if i == args.maxIter:\n break\n\n np.save(f\"{exp_dir}/{args.exp}.result.npy\",error_stats)\n","repo_name":"zhenpeiyang/RelativePose","sub_path":"evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":18205,"program_lang":"python","lang":"en","doc_type":"code","stars":149,"dataset":"github-code","pt":"72"}
+{"seq_id":"32534787950","text":"\"\"\" \nEsta version esta pensado en 2 programas principales: reorientacion y motorNorte.\n\n*reorientacion no tiene argumento. Solo enciende el magnetometro, toma un promedio sobre cada eje y calcula el angulo actual de la antena. Devuelve este ultimo redondeado.\n \n*motornorte(arg1,arg2):\nmotor recibe en arg1 el angulo actual del satelite a analizar y en arg2 el angulo\nazimutal de la antena.\nEste funciona con \"AutomatizacionV2.ino\" montado sobre dicha plaqueta.\nNo tiene valor de retorno.\n\nEsta funcion deberia correrse en un loop mientras el antena.py este corriendo.\nProbablemente no sean necesarias las lineas de coneccion a serial.\n\nLas instrucciones son enviada por serial como una cadena del estilo \"1,0\" y \"i,j\" con i = 2,3 y j un entero.\n\n\"\"\"\n\nimport numpy as np\nimport serial, time\nimport re\n\n#Correcion sobre grados calculados\ndef trunc(entrada):\n flot = entrada - int(entrada)\n if flot < 0.5:\n salida = int(entrada)\n elif flot >= 0.5:\n salida = int(entrada) + 1\n return salida\n\n\ndef reorientacion(arduino):\n #Inicia conexion con la plaqueta mediante USB:\n arduino.write(bytes('1,0', 'utf-8')) \n data = \"\"\n \n print(\"Escribiendo datos de la magnetización del IMU...\")\n for i in range(10):\n print(i)\n linea = arduino.readline().decode('utf-8')\n data = data +linea\n print(\"terminado\")\n\n\n data2=data.replace(';', '\\n').split('\\n')\n x = []\n y = []\n z = []\n\n rango = np.arange(0, len(data2), 3) \n rango = rango[0:-1]\n for j in rango:\n x =np.append(x, [float(data2[j])])\n y= np.append(y, [float(data2[j+1])])\n z= np.append(z, [float(data2[j+2])])\n\n print(\"Resultados (microTesla):\")\n\n mean_x =np.mean(x)\n mean_y =np.mean(y)\n print(mean_x,mean_y)\n print(\"Midiendo posicion relativa al norte\")\n\n import math\n fi_rad = math.atan(mean_x/mean_y)\n fi_grad_1 = fi_rad*180/math.pi \n if mean_x < 0:\n if mean_y> 0:\n fi_grad = fi_grad_1\n if mean_y < 0:\n fi_grad = fi_grad_1-180\n if mean_x > 0:\n if mean_y < 0:\n fi_grad = fi_grad_1+180\n if mean_y > 0:\n fi_grad = fi_grad_1\n\n print(\"El norte esta a \", str(round(fi_grad)), \"° respecto del eje x del IMU\")\n\n return trunc(fi_grad)\n\n\n\ndef motorNorte(angSat, angAct, arduino):\n\n unidad = 512/360\n deltaGrado = angSat - angAct \n grado = trunc(deltaGrado * unidad)\n\n if deltaGrado < 0: #Si el satelite se movio positivamente\n orden = '2,'+str(grado)\n print(orden) \n arduino.write(bytes(orden, 'utf-8'))\n\n elif deltaGrado > 0: #Si el satelite se movio negativamente\n orden = '3,'+str(grado) \n print(orden)\n arduino.write(bytes(orden, 'utf-8'))\n\n\n time.sleep(3)\n print('Reorientando...')\n\n#Estos son lineas de prueba, se pueden descomentar y correr simplemente este programa sin ningun otro\n\n#azimut = reorientacion()\n#print('azimut actual: ', azimut)\n#motorNorte(0, azimut)\n#motorNorte(50,60)\n\n","repo_name":"arellana/TeledeteccionIAFE","sub_path":"VersionFinal/Mediciones/programa2_v3.py","file_name":"programa2_v3.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"12222587275","text":"from trading_engine import *\nfrom data_processing import DataProcessing\ndp = DataProcessing([])\n\n\nfilename = r'data for trading\\streaming live prices.csv'\ninit_flags_and_order_ids(filename)\n\nwhile True:\n crossover_strategy(filename)","repo_name":"kesler20/trading_bot","sub_path":"trading_bot_test.py","file_name":"trading_bot_test.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"17417160106","text":"archivo=open(\"Factorial7.txt\",\"a\")\r\nimport psycopg2\r\n\r\ndef ingreso():\r\n try:\r\n entrada = int(input(\"Ingrese el numero que desea calcular:\"));\r\n \r\n except:\r\n print(\"Error, ingrese valores numericos\\n\")\r\n entrada=ingreso()\r\n return entrada\r\n\r\ndef verificar():\r\n entrada=ingreso()\r\n cociente, residuo = divmod(entrada, 7)\r\n\r\n if(residuo==0 and cociente>0):\r\n factorial = 1\r\n valor=entrada\r\n for i in range(entrada):\r\n factorial=factorial*entrada\r\n entrada=entrada-1\r\n \r\n\r\n conexion1 = psycopg2.connect(database=\"Prueba\", user=\"postgres\", password=\"usac21\")\r\n cursor1=conexion1.cursor()\r\n sql=\"insert into factorial7(valor, factorial) values (%s,%s)\"\r\n datos=(valor, factorial)\r\n cursor1.execute(sql, datos)\r\n conexion1.commit()\r\n conexion1.close() \r\n print (\"El fatorial es\",factorial,\"****Registro Almacenado****\\n\")\r\n \r\n\r\n \r\n elif(reciduo!=0 or cociente==0):\r\n print(\"El numero ingresado no es mutiplo de 7\\n\")\r\n\r\n \r\n\r\nverificar()\r\narchivo.close()\r\n","repo_name":"HeinzVelasquez/PAIE","sub_path":"Factorial mutiplo de 7.py","file_name":"Factorial mutiplo de 7.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"43999573325","text":"\r\nfrom collections import defaultdict\r\nimport networkx as nx\r\nwith open('1-kmers.txt') as f:\r\n lines = f.read().splitlines()\r\n edges = list()\r\n for line in lines:\r\n edges.append( (line[0:(len(line)-1)],line[1:len(line)]))\r\n\r\nprint(edges)\r\n\r\ngraph = nx.Graph()\r\ngraph.add_edges_from(edges)\r\nprint(graph.edges())\r\nprint(graph.nodes())\r\n\r\ndegrees = defaultdict(int)\r\nfor k in graph:\r\n print(k)\r\n for v in graph[k]:\r\n print(v)\r\n degrees[k] += 1\r\n degrees[v] -= 1\r\nsource = [k for k, v in degrees.items() if v == 1]\r\nsinc = [k for k, v in degrees.items() if v == -1]\r\n#print 'source: %s, sinc: %s' % (source, sinc)\r\n\r\nif sinc in graph.nodes():\r\n graph.add_edge(sink,source)\r\nelse:\r\n graph.add_node(sink)\r\n graph.add_edge(sink,source)\r\n\r\ncycles = {}\r\nwhile graph:\r\n current = next(iter(graph))\r\n cycle = [current]\r\n cycles[current] = cycle\r\n while current in graph:\r\n next_ = graph[current][0]\r\n del graph[current][0]\r\n if len(graph[current]) == 0:\r\n del graph[current]\r\n current = next_\r\n cycle.append(next_)\r\n\r\n\r\ndef traverse(tree, root):\r\n out = []\r\n for r in tree[root]:\r\n if r != root and r in tree:\r\n out += traverse(tree, r)\r\n else:\r\n out.append(r)\r\n return out\r\n\r\ncycle = traverse(cycles, 0)\r\nfor i in range(1, len(cycle)):\r\n if cycle[i-1] == sinc and cycle[i] == source:\r\n boarder = i\r\npath = cycle[boarder:]+cycle[1:boarder]\r\nprint ('->'.join([str(i) for i in path]))","repo_name":"SparshAgarwal/BioInformatics","sub_path":"HW1/PA2.py","file_name":"PA2.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"26679709830","text":"# Submission Link : https://leetcode.com/submissions/detail/230781316/\n\nclass Solution(object):\n def isAnagram(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n \n s = sorted(s)\n t = sorted(t)\n if s == t:\n return True\n else:\n return False\n \n","repo_name":"prateekiiest/Competitive-Programming-Algo-DS","sub_path":"LeetCode/valid_anagram.py","file_name":"valid_anagram.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"}
+{"seq_id":"72582018474","text":"import math\nimport numpy\n\ndef lines():\n with open(\"5.txt\") as fp:\n return fp.readlines()\n\ndef process_lines():\n post = []\n for a in lines():\n post.append(a)\n return post\n\n# First 7 makes a binary number\n# Last 3 makes a binary number\n# first 7 times 8 is just move 3 digits to the left\n# So the whole thing is just a binary number\n\nlines = process_lines()\n\n# lines = [\"BFFFBBFRRR\", \"FFFBBBFRRR\", \"BBFFBBFRLL\", \"FBFBBFFRLR\"]\nres = []\n\n\nfor l in lines:\n r = l.replace('F', '0')\n r = r.replace('B', '1')\n r = r.replace('L', '0')\n r = r.replace('R', '1')\n\n res.append(int(r, 2))\nprint(max(res))\n \n","repo_name":"Barisimre/AoC2020","sub_path":"5_1.py","file_name":"5_1.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"21785892179","text":"# Author: Bohua Zhan\n\nimport unittest\n\nfrom kernel.type import TVar, Type, TFun, boolT\nfrom kernel.term import Var, Const, Comb, Abs, Bound, Term\nfrom kernel.thm import Thm\nfrom logic import logic\nfrom logic import nat\nfrom logic import list\nfrom logic import set\nfrom logic import basic\nfrom logic import function\nfrom syntax import printer\n\nthy = basic.load_theory('list')\n\nA = Var(\"A\", boolT)\nB = Var(\"B\", boolT)\nC = Var(\"C\", boolT)\nTa = TVar(\"a\")\na = Var(\"a\", Ta)\nb = Var(\"b\", Ta)\nP = Var(\"P\", TFun(Ta, boolT))\nQ = Var(\"Q\", TFun(Ta, boolT))\nR = Var(\"R\", TFun(Ta, Ta, boolT))\nf = Var(\"f\", TFun(Ta, Ta))\nnn = Var(\"n\", TFun(boolT, boolT))\nm = Var(\"m\", nat.natT)\nn = Var(\"n\", nat.natT)\np = Var(\"p\", nat.natT)\nxs = Var(\"xs\", Type(\"list\", Ta))\nys = Var(\"ys\", Type(\"list\", Ta))\nzs = Var(\"zs\", Type(\"list\", Ta))\neq = Term.mk_equals\nimp = Term.mk_implies\nconj = logic.mk_conj\ndisj = logic.mk_disj\nabs = Term.mk_abs\nall = Term.mk_all\nneg = logic.neg\nexists = logic.mk_exists\nmk_if = logic.mk_if\n\nclass PrinterTest(unittest.TestCase):\n def testPrintLogical(self):\n test_data = [\n # Equality and implies\n (eq(a, b), \"a = b\"),\n (imp(A, B), \"A --> B\"),\n (imp(A, B, C), \"A --> B --> C\"),\n (imp(imp(A, B), C), \"(A --> B) --> C\"),\n (imp(A, eq(a, b)), \"A --> a = b\"),\n (eq(imp(A, B), imp(B, C)), \"(A --> B) = (B --> C)\"),\n (eq(A, eq(B, C)), \"A = (B = C)\"),\n (eq(eq(A, B), C), \"A = B = C\"),\n\n # Conjunction and disjunction\n (conj(A, B), \"A & B\"),\n (disj(A, B), \"A | B\"),\n (conj(A, conj(B, C)), \"A & B & C\"),\n (conj(conj(A, B), C), \"(A & B) & C\"),\n (disj(A, disj(B, C)), \"A | B | C\"),\n (disj(disj(A, B), C), \"(A | B) | C\"),\n (disj(conj(A, B), C), \"A & B | C\"),\n (conj(disj(A, B), C), \"(A | B) & C\"),\n (disj(A, conj(B, C)), \"A | B & C\"),\n (conj(A, disj(B, C)), \"A & (B | C)\"),\n (disj(conj(A, B), conj(B, C)), \"A & B | B & C\"),\n (conj(disj(A, B), disj(B, C)), \"(A | B) & (B | C)\"),\n\n # Negation\n (neg(A), \"~A\"),\n (neg(neg(A)), \"~~A\"),\n\n # Constants\n (logic.true, \"true\"),\n (logic.false, \"false\"),\n\n # Mixed\n (imp(conj(A, B), C), \"A & B --> C\"),\n (imp(A, disj(B, C)), \"A --> B | C\"),\n (conj(A, imp(B, C)), \"A & (B --> C)\"),\n (disj(imp(A, B), C), \"(A --> B) | C\"),\n (neg(conj(A, B)), \"~(A & B)\"),\n (neg(imp(A, B)), \"~(A --> B)\"),\n (neg(eq(A, B)), \"~A = B\"),\n (eq(neg(A), B), \"(~A) = B\"),\n (eq(neg(A), neg(B)), \"(~A) = (~B)\"),\n\n # Abstraction\n (abs(a, conj(P(a),Q(a))), \"%a. P a & Q a\"),\n\n # Quantifiers\n (all(a, P(a)), \"!a. P a\"),\n (all(a, all(b, conj(P(a),P(b)))), \"!a. !b. P a & P b\"),\n (all(a, conj(P(a), Q(a))), \"!a. P a & Q a\"),\n (conj(all(a, P(a)), Q(a)), \"(!a. P a) & Q a\"),\n (all(a, imp(P(a), Q(a))), \"!a. P a --> Q a\"),\n (imp(all(a, P(a)), Q(a)), \"(!a. P a) --> Q a\"),\n (imp(all(a, P(a)), all(a, Q(a))), \"(!a. P a) --> (!a. Q a)\"),\n (imp(exists(a, P(a)), exists(a, Q(a))), \"(?a. P a) --> (?a. Q a)\"),\n (eq(A, all(a, P(a))), \"A = (!a. P a)\"),\n (exists(a, P(a)), \"?a. P a\"),\n (exists(a, all(b, R(a, b))), \"?a. !b. R a b\"),\n (all(a, exists(b, R(a, b))), \"!a. ?b. R a b\"),\n\n # If\n (mk_if(A, a, b), \"if A then a else b\"),\n (eq(mk_if(A, a, b), a), \"(if A then a else b) = a\"),\n (mk_if(A, P, Q), \"if A then P else Q\"),\n ]\n\n for t, s in test_data:\n self.assertEqual(printer.print_term(thy, t), s)\n\n def testPrintFunction(self):\n test_data = [\n (P(a), \"P a\"),\n (P(f(a)), \"P (f a)\"),\n (R(a,a), \"R a a\"),\n (nn(conj(A,B)), \"n (A & B)\"),\n (conj(nn(A), B), \"n A & B\"),\n ]\n\n for t, s in test_data:\n self.assertEqual(printer.print_term(thy, t), s)\n\n def testPrintArithmetic(self):\n test_data = [\n (nat.plus(m, n), \"m + n\"),\n (nat.plus(nat.plus(m, n), p), \"m + n + p\"),\n (nat.plus(m, nat.plus(n, p)), \"m + (n + p)\"),\n (nat.times(m, n), \"m * n\"),\n (nat.times(nat.times(m, n), p), \"m * n * p\"),\n (nat.times(m, nat.times(n, p)), \"m * (n * p)\"),\n (nat.plus(m, nat.times(n, p)), \"m + n * p\"),\n (nat.times(m, nat.plus(n, p)), \"m * (n + p)\"),\n (nat.zero, \"0\"),\n (nat.plus(nat.zero, nat.zero), \"0 + 0\"),\n (nat.times(m, nat.zero), \"m * 0\"),\n ]\n\n for t, s in test_data:\n self.assertEqual(printer.print_term(thy, t), s)\n\n def testBinary(self):\n test_data = [\n (nat.one, \"1\"),\n (nat.bit0(nat.one), \"2\"),\n (nat.bit1(nat.one), \"3\"),\n (nat.Suc(nat.one), \"Suc 1\"),\n ]\n\n for t, s in test_data:\n self.assertEqual(printer.print_term(thy, t), s)\n\n def testPrintList(self):\n nil = list.nil\n cons = list.mk_cons\n append = list.mk_append\n test_data = [\n (append(xs, ys), \"xs @ ys\"),\n (append(append(xs, ys), zs), \"(xs @ ys) @ zs\"),\n (append(xs, append(ys, zs)), \"xs @ ys @ zs\"),\n (cons(a, nil(Ta)), \"[a]\"),\n (cons(a, cons(b, nil(Ta))), \"[a, b]\"),\n (cons(a, xs), \"a # xs\"),\n (append(cons(a, nil(Ta)), cons(b, nil(Ta))), \"[a] @ [b]\"),\n (cons(a, append(xs, ys)), \"a # xs @ ys\"),\n (append(cons(a, xs), ys), \"(a # xs) @ ys\"),\n (list.cons(Ta)(a), \"cons a\"),\n ]\n\n for t, s in test_data:\n self.assertEqual(printer.print_term(thy, t), s)\n\n def testPrintSet(self):\n A = Var(\"A\", set.setT(Ta))\n B = Var(\"B\", set.setT(Ta))\n x = Var(\"x\", Ta)\n test_data = [\n (set.empty_set(Ta), \"({}::'a set)\", \"(∅::'a set)\"),\n (set.mk_mem(x, A), \"x MEM A\", \"x ∈ A\"),\n (set.mk_subset(A, B), \"A SUB B\", \"A ⊆ B\"),\n (set.mk_inter(A, B), \"A INTER B\", \"A ∩ B\"),\n (set.mk_union(A, B), \"A UNION B\", \"A ∪ B\"),\n ]\n\n for t, s1, s2 in test_data:\n self.assertEqual(printer.print_term(thy, t), s1)\n self.assertEqual(printer.print_term(thy, t, unicode=True), s2)\n\n def testPrintFunction(self):\n test_data = [\n (function.mk_fun_upd(f, a, b), \"(f)(a := b)\"),\n (function.mk_fun_upd(f, a, b, b, a), \"(f)(a := b, b := a)\"),\n ]\n\n thy = basic.load_theory('function')\n for t, s in test_data:\n self.assertEqual(printer.print_term(thy, t), s)\n\n def testPrintWithType(self):\n test_data = [\n (list.nil(Ta), \"([]::'a list)\"),\n (eq(list.nil(Ta), list.nil(Ta)), \"([]::'a list) = []\"),\n (all(a, eq(a, a)), \"!a::'a. a = a\"),\n ]\n\n for t, s in test_data:\n self.assertEqual(printer.print_term(thy, t), s)\n\n def testPrintUnicode(self):\n test_data = [\n (conj(A, B), \"A ∧ B\"),\n (disj(A, B), \"A ∨ B\"),\n (imp(A, B), \"A ⟶ B\"),\n (abs(a, P(a)), \"λa. P a\"),\n (all(a, P(a)), \"∀a. P a\"),\n (exists(a, P(a)), \"∃a. P a\"),\n (neg(A), \"¬A\"),\n (nat.plus(m, n), \"m + n\"),\n (nat.times(m, n), \"m * n\"),\n ]\n\n for t, s in test_data:\n self.assertEqual(printer.print_term(thy, t, unicode=True), s)\n\n def testPrintHighlight(self):\n \"\"\"Test highlight\"\"\"\n # 0, 1, 2, 3 = NORMAL, BOUND, VAR, TVAR\n test_data = [\n (abs(a,P(a)), [('%',0),('a',1),('. ',0),('P ',2),('a',1)]),\n (all(a,P(a)), [('!',0),('a',1),('. ',0),('P ',2),(\"a\",1)]),\n (all(a,all(b,conj(P(a),P(b)))), [('!',0),('a',1),('. !',0),('b',1),('. ',0),('P ',2),('a',1),(' & ',0),('P ',2),('b',1)]),\n (exists(a,all(b,R(a,b))), [('?',0),(\"a\",1),('. !',0),('b',1),('. ',0),('R ',2),('a b',1)]),\n (exists(a,P(a)), [('?',0),('a',1),('. ',0),('P ',2),('a',1)]),\n (disj(disj(A,B),C), [('(',0),('A',2),(' | ',0),('B',2),(') | ',0),('C',2)]),\n (imp(imp(A,B),C), [('(',0),('A',2),(' --> ',0),('B',2),(') --> ',0),('C',2)]),\n (abs(a,a), [('%',0),('a',1),('::',0),(\"'a\",3),('. ',0),('a',1)]),\n ]\n\n for t, s in test_data:\n self.assertEqual(printer.print_term(thy, t, highlight=True), s)\n\n def testPrintThmHighlight(self):\n \"\"\"Test printing of theorems with highlight.\"\"\"\n # 0, 1, 2, 3 = NORMAL, BOUND, VAR, TVAR\n A = Var('A', boolT)\n B = Var('B', boolT)\n A_to_B = Term.mk_implies(A, B)\n th = Thm([A, A_to_B], B)\n res = printer.print_thm(thy, th, highlight=True)\n self.assertEqual(res, [('A',2),(', ',0),('A',2),(' --> ',0),('B',2),(' ',0),('|-',0),(' ',0),('B',2)])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"zhouwenfan/temp","sub_path":"syntax/tests/printer_test.py","file_name":"printer_test.py","file_ext":"py","file_size_in_byte":9176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"20540804158","text":"from setuptools import setup, find_packages\nimport sys, os\n\nversion = '0.1'\n\nsetup(name='gatekeeper',\n version=version,\n description=\"\",\n long_description=\"\"\" \"\"\",\n classifiers=[],\n keywords='',\n author='',\n author_email='',\n url='',\n license='',\n packages=find_packages('src'),\n package_dir = {'': 'src'},\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n \"cromlech.browser\",\n \"cromlech.webob\",\n \"dolmen.tales\",\n \"dolmen.template\",\n \"dolmen.forms.base\",\n \"dolmen.view\",\n \"dolmen.viewlet\",\n \"dolmen.message\",\n \"webob\",\n \"zope.i18nmessageid\",\n ],\n)\n","repo_name":"novareto/gatekeeper","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"30460448248","text":"# Importing the random module\nimport random\n # Defining the get_user_choice function\ndef get_user_choice():\n # while loop to repeatedly prompt the user until they enter a valid choice ('rock', 'paper', or 'scissors'\n while True:\n choice = input(\"Enter your choice (rock, paper, scissors): \")\n if choice in [\"rock\", \"paper\", \"scissors\"]:\n return choice\n else:\n print(\"Invalid choice. Please try again.\")\n# Defining the get_computer_choice function\ndef get_computer_choice():\n choices = [\"rock\", \"paper\", \"scissors\"]\n return random.choice(choices)\n\n# Defining the determine_winner function\ndef determine_winner(user_choice, computer_choice):\n if user_choice == computer_choice:\n return \"It's a tie!\"\n elif (\n (user_choice == \"rock\" and computer_choice == \"scissors\") or\n (user_choice == \"paper\" and computer_choice == \"rock\") or\n (user_choice == \"scissors\" and computer_choice == \"paper\")\n ):\n return \"You win!\"\n else:\n return \"Computer wins!\"\n\n# Start the game with the welcome message\nprint(\"Welcome to Rock, Paper, Scissors Game!\")\n\n# Prompting the user to enter their choice\nuser_choice = get_user_choice()\n\n# Generating the computer's choice and print it\ncomputer_choice = get_computer_choice()\nprint(\"Computer chooses:\", computer_choice)\n\n# Determiinge the winner and printing the result\nresult = determine_winner(user_choice, computer_choice)\nprint(result)\n","repo_name":"topisteronyango/plp-python-code-challenge","sub_path":"challenge3-rock-paper-scissors/rcokpaperscissors.py","file_name":"rcokpaperscissors.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"17370711783","text":"import numpy as np\nfrom sys import argv\nimport cs273b\n\ndata_dir = '/datadrive/project_data/'\n\ninsFreqs = []\ndelFreqs = []\nreference, ambiguous_bases = cs273b.load_bitpacked_reference(data_dir + \"Homo_sapiens_assembly19.fasta.bp\")\nfor i in range(1, 24):\n if i == 23:\n ch = 'X'\n else:\n ch = str(i)\n print('Processing ' + ch)\n referenceChr = reference[ch]\n c_len = len(referenceChr)\n\n insertionLocations = np.loadtxt(data_dir + \"indelLocations{}_ins.txt\".format(ch)).astype(int)\n deletionLocations = np.loadtxt(data_dir + \"indelLocations{}_del.txt\".format(ch)).astype(int)\n #indelLocations = np.concatenate((insertionLocations, deletionLocations)) - 1\n\n insFreq = float(len(insertionLocations)) / c_len\n delFreq = float(len(deletionLocations)) / c_len\n insFreqs.append(insFreq)\n delFreqs.append(delFreq)\n continue\n\n bucketsize = 1000000\n num_buckets = (c_len + bucketsize - 1) // bucketsize\n num_indels = [0]*num_buckets\n bucketsizes = [bucketsize]*num_buckets\n bucketsizes[-1] = c_len % bucketsize\n\n for il in indelLocations:\n num_indels[il / bucketsize] += 1\n\n freqs = [float(x)/y for x, y in zip(num_indels, bucketsizes)]\n\n print(np.array(freqs))\n\n #print(num_indels)\n #print(bucketsizes)\n\nprint(insFreqs)\nprint(delFreqs)\n","repo_name":"gakgsr/CS273b-Extension","sub_path":"all_histograms.py","file_name":"all_histograms.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"8491637981","text":"#!/usr/bin/env python3\nimport warnings\nimport numpy as np\nfrom math import log10, sqrt\nfrom tensorflow.image import ssim\n\n# Check size and types\ndef assert_sizeImages(original, modified):\n original = np.asarray(original)\n modified = np.asarray(modified)\n\n assert original.shape == modified.shape, \"Supplied images have different sizes \" + \\\n\tstr(original.shape) + \" and \" + str(modified.shape)\n \n if original.dtype != modified.dtype:\n msg = \"Supplied images have different dtypes \" + \\\n str(original.dtype) + \" and \" + str(modified.dtype)\n warnings.warn(msg)\n\n if len(original.shape) == 2:\n original = original[:,:,np.newaxis]\n modified = modified[:,:,np.newaxis]\n\n return original.astype(np.float64), modified.astype(np.float64)\n\n\n# Mean Square Error\ndef metric_MSE(original, modified):\n original, modified = assert_sizeImages(original,modified)\n return np.mean((original.astype(np.float64) - modified.astype(np.float64)) ** 2)\n\n\n# Root Mean Square Error\ndef metric_RMSE(original, modified):\n return sqrt(metric_MSE(original, modified))\n\n\n# Peak Signal to Noise Ratio\ndef metric_PSNR(original, modified):\n mse = metric_RMSE(original, modified)\n if mse == 0.:\n return np.inf\n return 20 * log10(255.0 / sqrt(mse))\n\n\n# Structural Similarity Index Measure\ndef metric_SSIM(original, modified, L=255):\n original, modified = assert_sizeImages(original,modified)\n return ssim(original, modified, L).numpy()\n\n\n# Spectral Angle Mapper\ndef metric_SAM(original, modified):\n original, modified = assert_sizeImages(original,modified)\n\n original = original.reshape((original.shape[0]*original.shape[1],original.shape[2]))\n modified = modified.reshape((modified.shape[0]*modified.shape[1],modified.shape[2]))\n\n N = original.shape[1]\n sam_angles = np.zeros(N)\n for i in range(original.shape[1]):\n val = np.clip(np.dot(original[:,i],modified[:,i]) / (np.linalg.norm(original[:,i])*np.linalg.norm(modified[:,i])),-1,1)\t\t\n sam_angles[i] = np.arccos(val)\n\n return np.mean(sam_angles)\n\n\n'''\nThis is a re-implmentation of the Python code that implements the HaarPSI metric introduced in\nthe following paper:\nR. Reisenhofer, S. Bosse, G. Kutyniok and T. Wiegand.\nA Haar Wavelet-Based Perceptual Similarity Index for Image Quality Assessment. (PDF)\nSignal Processing: Image Communication, vol. 61, 33-43, 2018.\nThe original Python implmentation can be found here:\nhttp://www.haarpsi.org/\nor here:\nhttps://github.com/rgcda/haarpsi\nThe original Python code computes haar gradients that are iaccurate and inefficient. This has\nbeen fixed in this code. As a result, this version is more accurate, and about 3 times faster.\nThis version of the code is also simpler to understand.\nNOTES:\n[1] Please note that as a result of using more accurate haar gradients, the similarity value\nreturned may be slightly different from the one obtained from the original code.\n[2] The original code limits the gradient computation to 3 scales only. This is the case here\ntoo. But the code generalizes to a greater number of scales too.\n[3] For a rather weak reason (viewing scale), in the original code, every input image is\ndownsampled by 2 in both dimensions. This is mimicked in this code.\n--------------------------\n24 August 2020\n(c) Radhakrishna Achanta\n--------------------------\n'''\n\n# Haar Perceptual Similarity Index\ndef metric_HaarPSI(original, modified):\n original, modified = assert_sizeImages(original,modified)\n \n def subsample(mat):\n\n mat = mat.astype(np.float64)\n out = (mat[0:-1, 0:-1,...] + mat[1:, 1:,...] + mat[1:, 0:-1,...] + mat[0:-1, 1:,...])/4\n return out[::2,::2,...]\n\n def RGB2YIQ(rgb):\n Y = 0.299 * rgb[:, :, 0] + 0.587 * rgb[:, :, 1] + 0.114 * rgb[:, :, 2]\n I = 0.596 * rgb[:, :, 0] - 0.274 * rgb[:, :, 1] - 0.322 * rgb[:, :, 2]\n Q = 0.211 * rgb[:, :, 0] - 0.523 * rgb[:, :, 1] + 0.312 * rgb[:, :, 2]\n\n return Y,I,Q\n\n def compute_haar_gradients(x,scales=3,doavg=True):\n\n grady = np.zeros((scales,)+x.shape)\n gradx = np.zeros((scales,)+x.shape)\n\n for s in range(scales):\n\n x2 = (x[:,:-1] + x[:,1:])*0.5 # average along rows\n grady[s,:-1,:-1] = x2[:-1,:] - x2[1:,:] # compute vertical gradients\n \n y2 = (x[:-1,:] + x[1:,:])*0.5 # average along columns\n gradx[s,:-1,:-1] = y2[:,:-1] - y2[:,1:] # compute horizontal gradients\n\n x[:-1,:-1] = (x2[:-1,:] + y2[:,:-1]) # average and reassign to x\n x = x*0.5\n\n return np.concatenate((grady, gradx),axis=0)\n\n def compute_avg(inp):\n\n out = np.zeros(inp.shape)\n out[:-1,:-1] = (inp[0:-1, 0:-1] + inp[1:, 1:] + inp[1:, 0:-1] + inp[0:-1, 1:])/4\n return out\n\n def compute_weights(coeff_refy, coeff_imgy):\n # Take the maxmimum between the absolute value of the gradients of reference and distorted images\n # for the coarsest level gradients\n v,h = scales-1, scales+scales-1\n wts_vert = np.maximum(np.abs(coeff_refy[v]),np.abs(coeff_imgy[v])) # coarsest vertical gradients\n wts_hori = np.maximum(np.abs(coeff_refy[h]),np.abs(coeff_imgy[h])) # coarsest horizontal gradients\n # wts_hv = (wts_hori+wts_vert)/2\n return wts_hori, wts_vert\n\n def compute_local_similarities_Y(coeff_refy, coeff_imgy):\n # Collect the absolute value of all the fine gradients for the reference image\n mag_ref_vert = np.abs(np.stack([coeff_refy[i] for i in range(scales-1)]))\n mag_ref_hori = np.abs(np.stack([coeff_refy[i+scales] for i in range(scales-1)]))\n\n # Collect the absolute value of all the fine gradents for the distorted image \n mag_img_vert = np.abs(np.stack([coeff_imgy[i] for i in range(scales-1)]))\n mag_img_hori = np.abs(np.stack([coeff_imgy[i+scales] for i in range(scales-1)]))\n\n # Compute the normalized correlation of the gradient magnitudes at the finest level\n local_sim_vert = np.sum((2 * mag_ref_vert * mag_img_vert + C)/(mag_ref_vert**2 + mag_img_vert**2 + C),axis=0)/2 # vertical\n local_sim_hori = np.sum((2 * mag_ref_hori * mag_img_hori + C)/(mag_ref_hori**2 + mag_img_hori**2 + C),axis=0)/2 # horizontal\n\n return local_sim_hori, local_sim_vert\n\n def compute_local_similarities_IQ(coeff_refi, coeff_refq, coeff_imgi, coeff_imgq):\n\n similarity_i = (2 * coeff_refi * coeff_imgi + C) / (coeff_refi**2 + coeff_imgi**2 + C)\n similarity_q = (2 * coeff_refq * coeff_imgq + C) / (coeff_refq**2 + coeff_imgq**2 + C)\n local_sim_iq = (similarity_i + similarity_q)/2\n\n return local_sim_iq\n\n\n # sigmoid function scaed by alpha\n def sigmoid(value, alpha):\n return 1.0 / (1.0 + np.exp(-alpha * value))\n\n # the inverse of the sigmoid function (i.e recovering x from sigmoid values)\n def logit(value, alpha):\n return np.log(value/(1 - value)) / alpha\n\n #----------------------------------\n # The main function.\n # Expected image shape is H,W (gray) or H,W,C (color) for the reference (original) and distorted (modified) images\n #----------------------------------\n def compute_similarity(original,modified):\n \n color_image = (3 == len(modified.shape)) # expected image shape is H,W (gray) or H,W,C (color)\n\n refy = subsample(original)\n imgy = subsample(modified)\n\n if color_image:\n refy,refi,refq = RGB2YIQ(refy.astype(np.float64))\n imgy,imgi,imgq = RGB2YIQ(imgy.astype(np.float64))\n\n coeff_refy = compute_haar_gradients(refy,scales,True)\n coeff_imgy = compute_haar_gradients(imgy,scales,True)\n\n wts_hori, wts_vert = compute_weights(coeff_refy, coeff_imgy)\n sim_yhori, sim_yvert = compute_local_similarities_Y(coeff_refy, coeff_imgy)\n\n weights = np.stack((wts_hori, wts_vert))\n local_similarities = np.stack((sim_yhori, sim_yvert))\n\n if color_image:\n # compute one additional term for weights and local_similarities in case of color images\n coeff_refi = np.abs(compute_avg(refi))\n coeff_refq = np.abs(compute_avg(refq))\n coeff_imgi = np.abs(compute_avg(imgi))\n coeff_imgq = np.abs(compute_avg(imgq))\n\n sim_iq = compute_local_similarities_IQ(coeff_refi, coeff_refq, coeff_imgi, coeff_imgq)\n\n weights = np.stack((wts_hori, wts_vert, (wts_hori+wts_vert)/2))\n local_similarities = np.stack((sim_yhori, sim_yvert, sim_iq))\n\n \n similarity = logit(np.sum(sigmoid(local_similarities[:], alpha) * weights[:]) / np.sum(weights[:]), alpha)**2\n\n return similarity\n\n\n if original.shape != modified.shape:\n raise ValueError(\"The shapes of the reference image and the distorted image do not match.\")\n #----------------------------------\n # Constants for the whole function\n #----------------------------------\n C = 30.0 # experimentally determined constant\n alpha = 4.2 # experimentally determined constant\n scales = 4\n\n return compute_similarity(original,modified)","repo_name":"Flare00/Safe-Eye","sub_path":"src/metric.py","file_name":"metric.py","file_ext":"py","file_size_in_byte":9169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"24623339326","text":"import streamlit as st\r\nimport siqr\r\nimport sird\r\n\r\nst.beta_set_page_config(\r\n page_title=\"Modelos Biomatemáticos\",\r\n \tlayout=\"centered\",\r\n \tinitial_sidebar_state=\"expanded\",\r\n)\r\n\r\nmodel = st.sidebar.selectbox('Seleccionar modelo', ['SIR-D','SIQR'])\r\n\r\nif model == 'SIQR':\r\n siqr.main()\r\n\r\nif model == 'SIR-D':\r\n sird.main()","repo_name":"joaquin-silva/modelos-biomatematicos","sub_path":"modelos.py","file_name":"modelos.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"70209269032","text":"import numpy as np\nimport sys\n\n# 3D laplace problem\n\nnx = 1\nny = 1\nnz = 4\n\nlocal = True\n\n# Dirichlet boundary conditions\nbc = {}\n\nif not local:\n for j in range(int(ny+1)):\n for i in range(int(nx+1)):\n # x = 0 plane\n x = i/nx\n y = j/ny\n index = int(j*(nx+1) + i)\n \n # z- plane (bottom)\n bc[index] = np.sin(x*np.pi)\n bc[index] = 1.0\n \n # z+ plane (top)\n index += int(nz*(nx+1)*(ny+1))\n bc[index] = np.sin(y*np.pi) + 2.0\n bc[index] = 2.0\n\nrank_no = (int)(sys.argv[-2])\n\nn_elements = [1,1,4]\nif local:\n n_elements = [1,1,1]\n if rank_no == 0:\n n_elements = [1,1,2]\n\n # boundary conditions\n bc = {}\n if rank_no == 0:\n bc = {dof:1.0 for dof in range(4)}\n elif rank_no == 2:\n bc = {-1-dof:2.0 for dof in range(4)}\n\nconfig = {\n \"logFormat\": \"csv\", # \"csv\" or \"json\", format of the lines in the log file, csv gives smaller files\n \"solverStructureDiagramFile\": \"solver_structure.txt\", # output file of a diagram that shows data connection between solvers\n \"FiniteElementMethod\" : {\n \"nElements\": n_elements,\n \"nRanks\": [1,1,1],\n \"inputMeshIsGlobal\": not local,\n \"physicalExtent\": [1.0, 1.0, 3.0],\n \"outputInterval\": 1.0,\n \n \"dirichletBoundaryConditions\": bc,\n \"dirichletOutputFilename\": None, # filename for a vtp file that contains the Dirichlet boundary condition nodes and their values, set to None to disable\n \"neumannBoundaryConditions\": [],\n \"prefactor\": 1,\n \n \"solverType\": \"gmres\",\n \"preconditionerType\": \"none\",\n \"relativeTolerance\": 1e-15,\n \"absoluteTolerance\": 1e-10, # 1e-10 absolute tolerance of the residual \n \"maxIterations\": 10000,\n \"dumpFormat\": \"default\",\n \"dumpFilename\": \"\",\n \n \"OutputWriter\" : [\n {\"format\": \"Paraview\", \"outputInterval\": 1, \"filename\": \"out/laplace\", \"binary\": False, \"fixedFormat\": False, \"onlyNodalValues\":True, \"combineFiles\":True, \"fileNumbering\": \"incremental\"}, \n {\"format\": \"PythonFile\", \"filename\": \"out/laplace\", \"outputInterval\": 1, \"binary\":False, \"onlyNodalValues\":True, \"fileNumbering\": \"incremental\"}\n ]\n },\n}\n","repo_name":"maierbn/opendihu","sub_path":"examples/laplace/laplace3d/settings_dirichlet.py","file_name":"settings_dirichlet.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"72"}
+{"seq_id":"14154526357","text":"import json\nimport os\nimport datetime\nimport pandas as pd\nimport numpy as np\n\nimport gzip\nimport sys\nif sys.version_info[0] < 3:\n from StringIO import StringIO\nelse:\n from io import StringIO\n\n\n\n# Helper functions\ndef extract_ip(x):\n if 'Source' in x.keys():\n ip = x['Source'][0].get('IP4', [''])[0]\n if ip is '':\n ip = x['Source'][0].get('IP6', [''])[0]\n if ip is '':\n raise ValueError('Failed to extract IP address')\n else:\n raise ValueError('Failed to access Source record while extracting IP address')\n return ip\n\n\ndef extract_time(x): #window):\n time_marked = x.get('EventTime', '')\n if time_marked is '':\n time_marked = x.get('DetectTime', '')\n if time_marked is '':\n raise ValueError('Failed to extract EventTime or DetectTime timestamp')\n\n fmt = '%Y-%m-%d'\n\n if 'T' in time_marked:\n fmt = fmt+'T'\n else:\n fmt = fmt+' '\n\n fmt = fmt+'%H:%M:%S'\n if '.' in time_marked:\n fmt = fmt+'.%f'\n fmt = fmt+'%z'\n\n if time_marked[-1]=='Z':\n time_marked = time_marked.rstrip('Z')+'+0000'\n\n try:\n timestamp = int(datetime.datetime.strptime(time_marked, fmt).timestamp())\n except ValueError:\n raise\n\n #if window['min'] < 0:\n # window['min'] = timestamp\n #elif timestamp < window['min']:\n # window['min'] = timestamp\n #if window['max'] < timestamp:\n # window['max'] = timestamp\n\n return timestamp\n\n\ndef get_series(evtsAt, length):\n length = np.int(length)\n vector = np.zeros(length, dtype=np.double)\n np.add.at(vector, np.array(evtsAt).astype(np.int), 1)\n #return pd.SparseArray(s, fill_value=0)\n return vector\n\n\ndef get_bin_series(evtsAt, length):\n length = np.int(length)\n vector = np.zeros(length, dtype=np.double)\n np.add.at(vector, np.array(evtsAt).astype(np.int), 1)\n #return pd.SparseArray(s, fill_value=0)\n return vector > 0\n\n#Could be done better\ndef count_blocks(lst):\n last_val = lst[0]\n sum_blocks = 0\n for x in lst:\n if x > 0 and x != last_val:\n sum_blocks += 1\n last_val = x\n return sum_blocks\n\n\n#Preprocess to time series of events, no features\ndef preprocess(file_path, silent=True):\n csv_str = \"ip,timestamp,origin,type,line\\n\" #line is a bit misleading\n\n evt_types = {}\n origins = {}\n\n origins_n = -1\n evt_types_n = -1\n\n proc = 0\n line_num = 0\n\n signs={}\n openf = open\n\n if file_path[-3:] == '.gz':\n import gzip\n openf = gzip.open\n\n with openf(file_path, 'r') as data:\n lst = 0\n curr = 0\n line = data.readline()\n while line:\n #To build line index for full event retrieval, read event with readline()\n curr = lst\n lst = data.tell()\n\n line_num = line_num + 1\n x = json.loads(line)\n\n try:\n #name = str(x['Node'][0]['Name'])\n name = str(x.get('Node', ['None']))\n category = str(x.get('Category', ['None']))\n\n #val = signs.get(str(x['Node']), 0)\n #signs[str(x['Node'])] = val+1\n\n origin = origins.get(name, origins_n+1)\n if origin > origins_n:\n origins[name] = origins_n+1\n origins_n = origins_n+1\n\n evt_type = evt_types.get(category, evt_types_n+1)\n if evt_type > evt_types_n:\n evt_types[category] = evt_types_n+1\n evt_types_n = evt_types_n+1\n\n ip = extract_ip(x)\n\n timestamp = extract_time(x)\n\n csv_str += f\"{ip},{timestamp},{origin},{evt_type},{curr}\\n\"#.format(ip, timestamp, origin, evt_type, curr)\n\n proc += 1\n\n except ValueError as err:\n if not silent:\n print(err, end=', ')\n print('while processing line {}'.format(line_num))\n pass\n finally:\n #if linenu > 1000: break\n pass\n\n line = data.readline()\n\n # with open('./data/Nodes.txt', 'w') as f:\n # for key, val in signs.items():\n # print('{}:{}'.format(key,val), file=f)\n\n res = pd.read_csv(StringIO(csv_str)) # it is faster the appending to data frame :)\n\n dt_origin = pd.CategoricalDtype(list(origins.keys()), ordered=True)\n dt_type = pd.CategoricalDtype(list(evt_types.keys()), ordered=True)\n\n res['origin'] = pd.Series(pd.Categorical.from_codes(codes=res['origin'].values, dtype=dt_origin))\n res['type'] = pd.Series(pd.Categorical.from_codes(codes=res['type'].values, dtype=dt_type))\n\n #if not silent:\n print('Processed {} % of events'.format(100 * proc / line_num))\n\n return res\n\ndef run_prep(file_path, prep_storage_dir):\n # file_path = './data/yyyy-mm-dd.idea'\n # where to store\n\n df = preprocess(file_path)\n\n path_str_list = os.path.split(file_path)\n file_name, suffix = path_str_list[1].split('.')\n\n file_path = prep_storage_dir + '/' + file_name\n df.to_pickle(file_path + '.pcl')\n\n return df\n\n\nif __name__ == '__main__':\n\n src = sys.argv[1]\n dst = sys.argv[2]\n if dst == '':\n dst = './data'\n\n working_dir = os.fsencode(src)\n\n for file in os.listdir(working_dir):\n file_name = os.fsdecode(file)\n\n if file_name.endswith(\".gz\") or file_name.endswith(\".idea\"):\n file_path = os.path.join(os.fsdecode(working_dir), file_name)\n res = run_prep(file_path, dst)\n else:\n continue\n","repo_name":"CESNET/SECT","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":5580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"42778560713","text":"from typing import List, Dict, Callable\nfrom collections import Counter\nimport math\nimport matplotlib.pyplot as plt\n\ndef bucketize(point: float, bucket_size: float) -> float:\n\treturn bucket_size * math.floor(point / bucket_size)\n\ndef make_histogram(points: List[float], bucket_size: float) -> Dict[float, int]:\n\treturn Counter(bucketize(point, bucket_size) for point in points)\n\ndef plot_histogram(points: List[float], bucket_size: float, title: str = ''):\n\thistogram = make_histogram(points, bucket_size)\n\tplt.bar(list(histogram.keys()), list(histogram.values()), width = bucket_size)\n\tplt.title(title)\n\tplt.show()\n\nimport random\n\ndef normal_cdf(x: float, mu: float = 0, sigma: float = 1) -> float:\n\treturn (1 + math.erf((x - mu) / math.sqrt(2) / sigma)) / 2\n\ndef inverse_normal_cdf(p: float, mu: float = 0, sigma: float = 1,\n\t\t\t\t\t tolerance: float = 0.00001) -> float:\n\tif mu != 0 or sigma != 1:\n\t\treturn mu + sigma * inverse_normal_cdf(p, tolerance = tolerance)\n\tlow_z = -10\n\thi_z = 10\n\twhile hi_z - low_z > tolerance:\n\t\tmid_z = (low_z + hi_z) / 2\n\t\tmid_p = normal_cdf(mid_z)\n\t\tif mid_p < p:\n\t\t\tlow_z = mid_z\n\t\telse:\n\t\t\thi_z = mid_z\n\treturn mid_z\n\nrandom.seed(0)\n\nuniform = [200 * random.random() - 100 for _ in range(10000)]\nnormal = [57 * inverse_normal_cdf(random.random()) for _ in range(10000)]\n\nplot_histogram(uniform, 10, 'Равномерная гистограмма')\nplot_histogram(normal, 10, 'Гистограмма нормального распределения')\n\ndef random_normal():\n\treturn inverse_normal_cdf(random.random())\n\nxs = [random_normal() for _ in range(1000)]\nys1 = [x + random_normal() / 2 for x in xs]\nys2 = [-x + random_normal() / 2 for x in xs]\n\nplt.scatter(xs, ys1, marker = '.', color = 'black', label = 'ys1')\nplt.scatter(xs, ys2, marker = '.', color = 'gray', label = 'ys2')\nplt.xlabel('xs')\nplt.ylabel('ys')\nplt.legend(loc = 9)\nplt.title('Совсем разные совместные распределения')\nplt.show()\n\ndef de_mean(xs: List[float]) -> List[float]:\n\tx_bar = mean(xs)\n\treturn [x - x_bar for x in xs]\n\ndef variance(xs: List[float]) -> float:\n\tassert len(xs) >= 2, 'Дисперсия требует наличия не менее двух элементов'\n\tn = len(xs)\n\tdeviations = de_mean(xs)\n\tsum_of_squares = sum([d ** 2 for d in deviations])\n\treturn sum_of_squares / (n - 1)\n\ndef standard_deviation(xs: List[float]) -> float:\n\treturn math.sqrt(variance(xs))\n\ndef covariance(xs: List[float], ys: List[float]) -> float:\n\tassert len(xs) == len(ys), 'xs и ys должны иметь одинаковое число элементов'\n\tmean_xs = mean(xs)\n\tmean_ys = mean(ys)\n\tcov = sum([(x_i - mean_xs) * (y_i - mean_ys)\n\t\t\t for x_i, y_i in zip(xs, ys)]) / (len(xs) - 1)\n\treturn cov\n\ndef correlation(xs: List[float], ys: List[float]) -> float:\n\tstdev_x = standard_deviation(xs)\n\tstdev_y = standard_deviation(ys)\n\tif stdev_x > 0 and stdev_y > 0:\n\t\treturn covariance(xs, ys) / stdev_x / stdev_y\n\telse:\n\t\treturn 0\n\nVector = List[int]\nMatrix = List[List[float]]\n\ndef make_matrix(num_rows: int, num_cols: int,\n\t\t\t\tentry_fn: Callable[[int, int], float]) -> Matrix:\n\treturn [[entry_fn(i, j)\n\t\t\t for j in range(num_cols)]\n\t\t\t for i in range(num_rows)]\n\ndef correlation_matrix(data: Matrix) -> Matrix:\n\tdef correlation_ij(i: int, j: int) -> float:\n\t\treturn correlation(data[i], data[j])\n\treturn make_matrix(len(data), len(data), correlation_ij)\n\ndef random_row() -> List[float]:\n\trow = [0.0, 0, 0, 0]\n\trow[0] = random_normal()\n\trow[1] = -5 * row[0] + random_normal()\n\trow[2] = row[0] + row[1] + 5 * random_normal()\n\trow[3] = 6 if row[2] > -2 else 0\n\treturn row\n\nnum_points = 100\ncorr_rows = [random_row() for _ in range(num_points)]\ncorr_data = [list(col) for col in zip(*corr_rows)]\nnum_vectors = len(corr_data)\nfig, ax = plt.subplots(num_vectors, num_vectors)\n\nfor i in range(num_vectors):\n\tfor j in range(num_vectors):\n\t\tif i != j:\n\t\t\tax[i][j].scatter(corr_data[j], corr_data[i])\n\t\telse:\n\t\t\tax[i][j].annotate('Серия' + str(i), (0.5, 0.5),\n\t\t\t\t\t\t\t xycoords = 'axes fraction',\n\t\t\t\t\t\t\t ha = 'center', va = 'center')\n\t\tif i < num_vectors - 1:\n\t\t\tax[i][j].xaxis.set_visible(False)\n\t\tif j > 0: ax[i][j].yaxis.set_visible(False)\n\nax[-1][-1].set_xlim(ax[0][-1].get_xlim())\nax[0][0].set_ylim(ax[0][1].get_ylim())\nplt.show()\n\nimport datetime\n\nstock_price = {'closing_price': 102.06,\n 'date': datetime.date(2014, 8, 29),\n 'symbol': 'AAPL'}\n\nfrom collections import namedtuple\n\nStockPrice = namedtuple('StockPrice', ['symbol', 'date', 'closing_price'])\nprice = StockPrice('MSFT', datetime.date(2018, 12, 14), 106.03)\n\nassert price.symbol == 'MSFT'\nassert price.closing_price == 106.03\n\nfrom typing import NamedTuple\n\nclass StockPrice1(NamedTuple):\n\tsymbol: str\n\tdate: datetime.date\n\tclosing_price: float\n\n\tdef is_high_tech(self) -> bool:\n\t\treturn self.symbol in ['MSFT', 'GOOG', 'FB', 'AMZN', 'AAPL']\n\nprice1 = StockPrice1('MSFT', datetime.date(2018, 12, 14), 106.03)\n\nassert price1.symbol == 'MSFT'\nassert price1.closing_price == 106.03\nassert price1.is_high_tech()\n\nfrom dataclasses import dataclass\n\n@dataclass\nclass StockPrice2:\n\tsymbol: str\n\tdate: datetime.date\n\tclosing_price: float\n\n\tdef is_high_tech(self) -> bool:\n\t\treturn self.symbol in ['MSFT', 'GOOG', 'FB', 'AMZN', 'AAPL']\n\nprice2 = StockPrice2('MSFT', datetime.date(2018, 12, 14), 106.03)\n\nassert price2.symbol == 'MSFT'\nassert price2.closing_price == 106.03\nassert price2.is_high_tech()\n\nprice2.closing_price /= 2\nassert price2.closing_price == 53.015\n\nfrom dateutil.parser import parse\n\ndef parse_row(row: List[str]) -> StockPrice:\n\tsymbol, date, closing_price = row\n\treturn StockPrice(symbol = symbol,\n\t\t\t\t\t date = parse(date).date(),\n\t\t\t\t\t closing_price = float(closing_price))\n\nstock = parse_row(['MSFT', '2018-12-14', '106.03'])\n\nassert stock.symbol == 'MSFT'\nassert stock.date == datetime.date(2018, 12, 14)\nassert stock.closing_price == 106.03\n\nfrom typing import Optional\nimport re\n\ndef try_parse_row(row: List[str]) -> Optional[StockPrice]:\n\tsymbol, date_, closing_price_ = row\n\tif not re.match(r'^[A-Z]+$', symbol):\n\t\treturn None\n\ttry:\n\t\tdate = parse(date_).date()\n\texcept ValueError:\n\t\treturn None\n\ttry:\n\t\tclosing_price = float(closing_price_)\n\texcept ValueError:\n\t\treturn None\n\treturn StockPrice(symbol, date, closing_price)\n\nassert try_parse_row(['MSFT0', '2018-12-14', '106.03']) is None\nassert try_parse_row(['MSFT', '2018-12--14', '106.03']) is None\nassert try_parse_row(['MSFT', '2018-12-14', 'x']) is None\nassert try_parse_row(['MSFT', '2018-12-14', '106.03']) == stock\n\ndef subtract(v: Vector, w: Vector) -> Vector:\n\tassert len(v) == len(w), 'Векторы должны иметь одинаковую длину'\n\treturn [v_i - w_i for v_i, w_i in zip(v, w)]\n\ndef dot(v: Vector, w: Vector) -> float:\n\tassert len(v) == len(w)\n\treturn sum(v_i * w_i for v_i, w_i in zip(v, w))\n\ndef sum_of_squares(v: Vector) -> float:\n\treturn dot(v, v)\n\ndef squared_distance(v: Vector, w: Vector) -> float:\n\treturn sum_of_squares(subtract(v, w))\n\ndef distance(v: Vector, w: Vector) -> float:\n\treturn math.sqrt(squared_distance(v, w))\n\na_to_b = distance([63, 150], [67, 160])\na_to_c = distance([63, 150], [70, 171])\nb_to_c = distance([67, 160], [70, 171])\nprint(a_to_b, a_to_c, b_to_c)\n\na_to_b = distance([160, 150], [170.2, 160])\na_to_c = distance([160, 150], [177.8, 171])\nb_to_c = distance([170.2, 160], [177.8, 171])\nprint(a_to_b, a_to_c, b_to_c)\n\nfrom typing import Tuple\n\ndef scalar_multiply(c: float, v: Vector) -> Vector:\n\treturn [c * v_i for v_i in v]\n\ndef vector_sum(vectors: List[Vector]) -> Vector:\n\tassert vectors, 'Векторы не предоставлены!'\n\tnum_elements = len(vectors[0])\n\tassert all(len(v) == num_elements for v in vectors), 'Разные размеры!'\n\treturn [sum(vector[i] for vector in vectors)\n\t\t\tfor i in range(num_elements)]\n\ndef vector_mean(vectors: List[Vector]) -> Vector:\n\tn = len(vectors)\n\treturn scalar_multiply(1 / n, vector_sum(vectors))\n\ndef mean(xs: List[float]) -> float:\n\treturn sum(xs) / len(xs)\n\ndef de_mean(xs: List[float]) -> List[float]:\n\tx_bar = mean(xs)\n\treturn [x - x_bar for x in xs]\n\ndef variance(xs: List[float]) -> float:\n\tassert len(xs) >= 2, 'Дисперсия требует наличия не менее двух элементов'\n\tn = len(xs)\n\tdeviations = de_mean(xs)\n\tsum_of_squares = sum([d ** 2 for d in deviations])\n\treturn sum_of_squares / (n - 1)\n\ndef standard_deviation(xs: List[float]) -> float:\n\treturn math.sqrt(variance(xs))\n\ndef magnitude(v: Vector) -> float:\n\treturn math.sqrt(sum_of_squares(v))\n\ndef scale(data: List[Vector]) -> Tuple[Vector, Vector]:\n\tdim = len(data[0])\n\tmeans = vector_mean(data)\n\tstdevs = [standard_deviation([vector[i] for vector in data])\n\t\t\t for i in range(dim)]\n\treturn means, stdevs\n\nvectors = [[-3, -1, 1], [-1, 0, 1], [1, 1, 1]]\nmeans, stdevs = scale(vectors)\nassert means == [-1, 0, 1]\nassert stdevs == [2, 1, 0]\n\ndef rescale(data: List[Vector]) -> List[Vector]:\n\tdim = len(data[0])\n\tmeans, stdevs = scale(data)\n\trescaled = [v[:] for v in data]\n\tfor v in rescaled:\n\t\tfor i in range(dim):\n\t\t\tif stdevs[i] > 0:\n\t\t\t\tv[i] = (v[i] - means[i]) / stdevs[i]\n\treturn rescaled\n\nmeans, stdevs = scale(rescale(vectors))\nassert means == [0, 0, 1]\nassert stdevs == [1, 1, 0]\n\nimport tqdm\n\nfor i in tqdm.tqdm(range(100)):\n\t_ = [random.random() for _ in range(100000)]\n\ndef primes_up_to(n: int) -> List[int]:\n\tprimes = [2]\n\twith tqdm.trange(3, n) as t:\n\t\tfor i in t:\n\t\t\ti_is_prime = not any(i % p == 0 for p in primes)\n\t\t\tif i_is_prime:\n\t\t\t\tprimes.append(i)\n\t\t\tt.set_description(f'{len(primes)} простых')\n\treturn primes\n\nmy_primes = primes_up_to(100)\n\ndef de_mean(data: List[Vector]) -> List[Vector]:\n\tmean = vector_mean(data)\n\treturn [subtract(vector, mean) for vector in data]\n\ndef direction(w: Vector) -> Vector:\n\tmag = magnitude(w)\n\treturn [w_i / mag for w_i in w]\n\ndef directional_varience(data: List[Vector], w: Vector) -> float:\n\tw_dir = direction(w)\n\treturn sum(dot(v, w_dir) ** 2 for v in data)\n\ndef directional_varience_gradient(data: List[Vector], w: Vector) -> Vector:\n\tw_dir = direction(w)\n\treturn [sum(2 * dot(v, w_dir) * v[i] for v in data)\n\t\t\tfor i in range(len(w))]\n\ndef add(v: Vector, w: Vector) -> Vector:\n\tassert len(v) == len(w), 'Векторы должны иметь одинаковую длину'\n\treturn [v_i + w_i for v_i, w_i in zip(v, w)]\n\ndef gradient_step(v: Vector, gradient: Vector, step_size: float) -> Vector:\n\tassert len(v) == len(gradient)\n\tstep = scalar_multiply(step_size, gradient)\n\treturn add(v, step)\n\ndef first_principal_component(data: List[Vector],\n\t\t\t\t\t\t\t n: int = 100,\n\t\t\t\t\t\t\t step_size: float = 0.1) -> Vector:\n\tguess = [1.0 for _ in data[0]]\n\twith tqdm.trange(n) as t:\n\t\tfor _ in t:\n\t\t\tdv = directional_varience(data, guess)\n\t\t\tgradient = directional_varience_gradient(data, guess)\n\t\t\tguess = gradient_step(guess, gradient, step_size)\n\t\t\tt.set_description(f'dv: {dv:.3f}')\n\treturn direction(guess)\n\ndef project(v: Vector, w: Vector) -> Vector:\n\tprojection_length = dot(v, w)\n\treturn scalar_multiply(projection_length, w)\n\ndef remove_projection_from_vector(v: Vector, w: Vector) -> Vector:\n\treturn subtract(v, project(v, w))\n\ndef remove_projection(data: List[Vector], w: Vector) -> List[Vector]:\n\treturn [remove_projection_from_vector(v, w) for v in data]\n\ndef pca(data: List[Vector], num_components: int) -> List[Vector]:\n\tcomponents: List[Vector] = []\n\tfor _ in range(num_components):\n\t\tcomponent = first_principal_component(data)\n\t\tcomponents.append(component)\n\t\tdata = remove_projection(data, component)\n\treturn components\n\ndef transform_vector(v: Vector, components: List[Vector]) -> Vector:\n\treturn [dot(v, w) for w in components]\n\ndef transform(data: List[Vector], components: List[Vector]) -> List[Vector]:\n\treturn [transform_vector(v, components) for v in data]\n","repo_name":"MikhailSukhanov/DS_projects","sub_path":"DS_topics_Python_implementation/Working_with_data.py","file_name":"Working_with_data.py","file_ext":"py","file_size_in_byte":11797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"21904085398","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom collections import defaultdict\nimport numpy as np\n# import numpy as np\n\nclass Topology():\n def __init__(self, name='Topological analysis', loc_err=False):\n self.topology = nx.DiGraph()\n self.name = name\n self.loc_err = loc_err\n\n def load_csv(self, csv_path):\n self.df = pd.read_csv(csv_path, index_col=0)\n\n def set_topology(self, topo):\n self.topology = topo\n\n # set edge color based on num invo weight\n def set_edge_colors(self):\n def normalize(x, max_invo, min_invo):\n max_range = 1.0\n min_range = 0.2\n return (max_range - min_range) * (x - min_invo) / (max_invo - min_invo) + min_range\n\n invos_dict = nx.get_edge_attributes(self.topology, 'num_invo')\n invos = list(invos_dict.values())\n max_invo = max(invos)\n min_invo = min(invos)\n return list(map(lambda x: cm.Blues(normalize(x, max_invo, min_invo)), invos))\n\n def set_node_colors_and_label(self, reg_node_color):\n node_color = []\n node_label_dict = {}\n for node in self.topology.nodes(data=True):\n if 'rank' in node[1]:\n node_color.append(cm.YlOrRd(float(node[1]['rank']/8)))\n else: \n node_color.append(reg_node_color)\n node_label_dict[node[0]] = node[1]['label']\n\n return node_color, node_label_dict\n\n def generate_topology(self, df: pd.DataFrame, row_labels: set, exclude=False, exclude_label='node', loc_err_conf={}):\n self.df = df\n self.exclude = exclude\n self.exclude_label = exclude_label\n self.row_labels = row_labels\n edge_attr_dict = defaultdict(dict)\n node_attr_dict = defaultdict(dict)\n\n # Check if predict column exists on the dataframe\n if self.loc_err and 'predict' not in self.df.columns:\n self.loc_err = False\n\n if self.loc_err and not loc_err_conf:\n raise Exception(\"Must provide loc_err_conf for Topology instance with loc_err enabled\")\n \n for _, row in self.df.iterrows():\n # filter out some nodes\n if not self.exclude:\n if (self.exclude_label in row[self.row_labels[0]] or self.exclude_label in row[self.row_labels[1]]):\n continue\n # generate DAG\n self.topology.add_edge(row[self.row_labels[0]], row[self.row_labels[1]])\n # edge attribute: number of invocation for the edge\n if 'num_invo' not in edge_attr_dict[(row[self.row_labels[0]], row[self.row_labels[1]])]:\n edge_attr_dict[(row[self.row_labels[0]], row[self.row_labels[1]])]['num_invo'] = 0\n edge_attr_dict[(row[self.row_labels[0]], row[self.row_labels[1]])]['num_invo'] += 1\n if self.loc_err and row['predict']:\n # edge attribute: boolean to indicate if the invocation was detected as anomalous\n edge_attr_dict[(row[self.row_labels[0]], row[self.row_labels[1]])]['anomalous'] = True\n\n\n if 'selected_features' in loc_err_conf:\n for k, v in loc_err_conf['selected_features'].items():\n edge_attr_dict = dict(edge_attr_dict)\n # edge attribute: features that were selected for anomaly detection of the edge\n edge_attr_dict[k]['selected_features'] = v\n nx.set_edge_attributes(self.topology, edge_attr_dict)\n\n # Set node fact\n for node in self.topology.nodes():\n node_attr_dict[node]['label'] = node\n\n if 'root_cause' in loc_err_conf:\n for rc in loc_err_conf['root_cause']:\n node_attr_dict[rc]['label'] += '*(RC)*'\n if 'predictions' in loc_err_conf:\n for rank, prediction in enumerate(loc_err_conf['predictions'], 1):\n node_attr_dict[prediction]['rank'] = rank\n node_attr_dict[prediction]['label'] += f'~[{rank}]'\n\n nx.set_node_attributes(self.topology, node_attr_dict)\n\n def pagerank(self):\n self.reversed_topology = self.topology.reverse(copy=True)\n pr = nx.pagerank(self.reversed_topology) \n self.pr = {k: v for k, v in sorted(pr.items(), key=lambda item: item[1], reverse=True)}\n return self.pr\n\n def get_io_egdes(self):\n def format_node(node):\n in_edges = list(self.topology.in_edges(node, data=True))\n out_edges = list(self.topology.out_edges(node, data=True))\n num_in = len(in_edges)\n num_out = len(out_edges)\n\n invo_in = []\n anomalies_in = 0\n for in_e in in_edges:\n invo_in.append(in_e[2]['num_invo'] )\n if 'anomalous' in in_e[2]:\n anomalies_in += 1\n\n invo_out = [] \n anomalies_out = 0\n for out_e in out_edges:\n invo_out.append(out_e[2]['num_invo']) \n if 'anomalous' in out_e[2]:\n anomalies_out += 1\n\n in_avg, in_var = calc_avg_and_var(invo_in)\n out_avg, out_var = calc_avg_and_var(invo_out)\n return_dict = {\n 'in_edges': in_edges,\n 'out_edges': out_edges,\n 'num_in': num_in,\n 'num_out': num_out,\n 'num_out-in': num_out - num_in,\n 'num_invo-in': sum(invo_in),\n 'num_invo-out': sum(invo_out),\n 'num_invo-in-avg': in_avg,\n 'num_invo-out-avg': out_avg,\n 'num_invo-in-var': in_var,\n 'num_invo-out-var': out_var,\n } \n\n if self.loc_err:\n return_dict['num_anomalous_in'] = anomalies_in\n return_dict['num_anomalous_out'] = anomalies_out\n\n return return_dict\n\n def calc_avg_and_var(invo_list):\n if len(invo_list) > 1:\n return np.mean(np.asarray(invo_list)), np.var(np.asarray(invo_list))\n return 'n/a', 'n/a'\n\n \n self.nodes_desc = dict(map(lambda k: (k, format_node(k)), self.topology.nodes()))\n\n\n def rank_nodes(self, order: str):\n self.get_io_egdes()\n if order == 'out':\n return {k: v for k, v in sorted(self.nodes_desc.items(), key=lambda x: x[1]['num_out'], reverse=True)}\n\n elif order == 'in':\n return {k: v for k, v in sorted(self.nodes_desc.items(), key=lambda x: x[1]['num_in'], reverse=True)}\n\n elif order == 'diff':\n return {k: v for k, v in sorted(self.nodes_desc.items(), key=lambda x: x[1]['num_out-in'], reverse=True)}\n\n elif order == 'invo-in':\n return {k: v for k, v in sorted(self.nodes_desc.items(), key=lambda x: x[1]['num_invo-in'], reverse=True)}\n\n elif order == 'invo-out':\n return {k: v for k, v in sorted(self.nodes_desc.items(), key=lambda x: x[1]['num_invo-out'], reverse=True)}\n\n else:\n raise Exception(\"order must be either 'in', 'out', or 'diff'\")\n\n # TODO \n # node and edge coloring using cm\n # default configs\n def draw(self, show, path='', edge_label=False, plot_opt={}):\n plot_opt_default = {\n 'node_color': (0.57,0.71,0.41,0.75),\n 'node_lable':{},\n 'edge_color': 'gray',\n }\n if 'ax' not in plot_opt:\n def_fig = plt.figure(figsize=[16,9],dpi=120)\n def_ax = def_fig.add_axes([0,0,1,1])\n def_ax.set_title(self.name)\n plot_opt_default['ax'] = def_ax\n\n plot_opt = plot_opt_default | plot_opt\n\n if self.loc_err:\n plot_opt['edge_color'] = self.set_edge_colors()\n\n plot_opt['node_color'], plot_opt['node_label'] = self.set_node_colors_and_label(plot_opt['node_color'])\n\n anomalous_edges = nx.get_edge_attributes(self.topology, 'anomalous')\n anomalous_edges = list(anomalous_edges.keys()) \n nx.draw_networkx_edges(self.topology, pos=nx.nx_pydot.graphviz_layout(self.topology, prog='dot'), edgelist=anomalous_edges, style='solid', ax=plot_opt['ax'], width=3, arrowsize=1, edge_color='red')\n\n el = nx.get_edge_attributes(self.topology, 'selected_features')\n for k, v in el.items():\n plot_opt['ax'].plot([], [], 'r_', label=f'{k}: {\", \".join(v)}')\n\n nx.draw_networkx(self.topology, pos=nx.nx_pydot.graphviz_layout(self.topology, prog='dot'), ax=plot_opt['ax'], node_size=300, font_size=7, font_color='#373737', width=2, arrowsize=10, edge_color=plot_opt['edge_color'], node_color=plot_opt['node_color'], labels=plot_opt['node_label'])\n\n if edge_label:\n el = nx.get_edge_attributes(self.topology, 'num_invo')\n bbox = dict(boxstyle='round', ec=(0.0, 1.0, 1.0, 0), fc=(0.0, 1.0, 1.0, 0))\n nx.draw_networkx_edge_labels(self.topology, pos=nx.nx_pydot.graphviz_layout(self.topology, prog='dot'), ax=plot_opt['ax'], edge_labels = el, font_size=8, verticalalignment='bottom', label_pos= 0.5, rotate=True, bbox=bbox)\n\n plot_opt['ax'].legend()\n\n if show:\n plt.show()\n if path:\n plt.savefig(path)\n plt.close()\n\n","repo_name":"hanapedia/rca_implemented","sub_path":"topological_analysis/topological_analysis.py","file_name":"topological_analysis.py","file_ext":"py","file_size_in_byte":9285,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"27158260642","text":"import os\nimport random\nimport string\nfrom PIL import Image\nfrom flask import render_template, url_for, flash, redirect, request, abort\nfrom psrPlatform import app, db, bcrypt\nfrom psrPlatform.forms import RegistrationForm, LoginForm, RateForm, PicForm\nfrom psrPlatform.models import Users, Products, Ratings\nfrom psrPlatform import SVDpp_val, user_knn, user_pool\nfrom psrPlatform import get_similar_users, get_top_N_recommended_items\nfrom flask_login import login_user, current_user, logout_user, login_required\nfrom werkzeug.utils import secure_filename\nimport pickle\nimport numpy as np\nimport tensorflow as tf\nfrom keras.preprocessing import image\nfrom keras.models import load_model\nfrom keras.backend import clear_session, set_session\nfrom datetime import timedelta\n\n## load the deep learning model\nlabel2idx = pickle.load(open('./psrPlatform/img_model/img_label2idx.pkl', 'rb'))\nidx2label = {i:j for j, i in label2idx.items()}\nsess = tf.Session()\ngraph = tf.get_default_graph()\nset_session(sess)\nmodel = load_model(\"./psrPlatform/img_model/model_47000img_identification.h5\") \n\ndef gen_reviewerID(stringLength=14):\n lettersAndDigits = string.ascii_letters + string.digits\n return ''.join((random.choice(lettersAndDigits) for i in range(stringLength))).upper() \n\n# default page\n@app.route(\"/\")\n@app.route(\"/default\")\ndef default():\n return render_template('default.html')\n\n# home page\n@app.route(\"/home\")\n@login_required\ndef home():\n user = Users.query.filter_by(reviewerName=current_user.reviewerName).first_or_404()\n return render_template('home.html', username=user.reviewerName)\n\n\n@app.route(\"/games_by_p/\")\n@login_required\ndef games_by_p(price_choose):\n page = request.args.get('page', 1, type=int)\n price_choose_int = int(price_choose)\n games = Products.query.filter(Products.price < price_choose_int).paginate(page=page, per_page=5)\n return render_template('games_list.html', games=games, price_choose=price_choose)\n\n@app.route(\"/games_by_pic \", methods=['GET', 'POST'])\n@login_required\ndef games_by_pic():\n form = PicForm()\n if form.validate_on_submit():\n games_pre = pre_picture(form.picture.data)\n games_found = [Products.query.filter_by(asin = i).first() for i in games_pre]\n games_found = [i for i in games_found if i is not None]\n if games_found:\n return render_template('games_list_pic.html', games = games_found)\n else: \n flash('Whoops. Unable to find the image due to no record in database. Try another one.', 'danger')\n return render_template('search_by_pic.html', form=form, legend='Search by Picture')\n\n@app.route(\"/recommended_games\")\n@login_required\ndef recommended_games():\n user = Users.query.filter_by(reviewerName=current_user.reviewerName).first_or_404()\n userId = Users.query.filter_by(reviewerName=current_user.reviewerName).first().reviewerID\n print(userId)\n if userId not in user_pool:\n userId = random.sample(user_pool, 1)[0]\n recom_games = get_top_N_recommended_items(userId)\n games_found = [Products.query.filter_by(asin = i).first() for i in recom_games]\n games_found = [i for i in games_found if i is not None]\n if games_found :\n return render_template('recommended_games.html', games = games_found)\n else:\n flash('Unable to recommend. Rate more games would help the system learn!', 'danger')\n return render_template('home.html', username=user.reviewerName)\n\n\n\n@app.route(\"/games/\")\n@login_required\ndef game_detail(game_id):\n game = Products.query.filter_by(asin = game_id).first_or_404()\n page = request.args.get('page', 1, type=int)\n rates = Ratings.query.filter_by(asin = game_id).order_by(Ratings.reviewTime.desc())\\\n .paginate(page=page, per_page=5)\n return render_template('game_detail.html', game = game, rates = rates)\n\n@app.route(\"/rate/new\", methods=['GET', 'POST'])\n@login_required\ndef create_post():\n form = RateForm()\n if form.validate_on_submit():\n game = Products.query.filter_by(asin = form.game_id.data).first()\n #print(game.asin)\n rate = Ratings(product=game,\n reviewerName = current_user.reviewerName,\n reviewText=form.comment.data,\n summary = form.summary.data,\n rating = int(form.score.data),\n author=current_user)\n db.session.add(rate)\n db.session.commit()\n flash('Your rate has been submitted!', 'success')\n return redirect(url_for('home'))\n return render_template('create_post.html', title='New Rate',\n form=form, legend='New Rate')\n\n@app.route(\"/user_rates\")\n@login_required\ndef user_rates():\n page = request.args.get('page', 1, type=int)\n rates = Ratings.query.filter_by(author=current_user)\\\n .order_by(Ratings.reviewTime.desc())\\\n .paginate(page=page, per_page=5)\n username=current_user.reviewerName\n return render_template('user_rates.html', rates=rates, username=username)\n\n\n@app.route(\"/register\", methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = RegistrationForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n while True:\n new_reviewerID = gen_reviewerID()\n if not Users.query.get(new_reviewerID):\n break\n user = Users(reviewerID = new_reviewerID, reviewerName=form.username.data, reviewerPW=hashed_password)\n db.session.add(user)\n db.session.commit()\n flash('Your account has been created! You are now able to log in', 'success')\n return redirect(url_for('login'))\n return render_template('register.html', title='Register', form=form)\n\n\n@app.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = LoginForm()\n if form.validate_on_submit():\n user = Users.query.filter_by(reviewerName=form.username.data).first()\n if user and bcrypt.check_password_hash(user.reviewerPW, form.password.data):\n login_user(user,duration = timedelta(minutes=5))\n next_page = request.args.get('next')\n return redirect(next_page) if next_page else redirect(url_for('home'))\n else:\n flash('Login Unsuccessful. Please check account and password', 'danger')\n return render_template('login.html', title='Login', form=form)\n\n\n@app.route(\"/logout\")\ndef logout():\n logout_user()\n return redirect(url_for('default'))\n\n\ndef save_picture(form_picture):\n picture_path = os.path.join(app.root_path, 'static/profile_pics', form_picture.filename)\n output_size = (150, 150)\n i = Image.open(form_picture)\n i.thumbnail(output_size)\n i.save(picture_path)\n\n return picture_path\n\n\ndef pre_picture(form_picture):\n img = Image.open(form_picture)\n img = img.resize((150, 150))\n x = np.expand_dims(image.img_to_array(img), axis=0)/255.0\n #print(x.shape)\n #print(model.summary())\n global sess\n global graph\n with graph.as_default():\n set_session(sess)\n pred = model.predict(x)\n #clear_session()\n # get top 3 product id with confidence\n pred = pred.flatten()\n top_3_idx = pred.argsort()[::-1][:3]\n top_3_items = {idx2label[i]:np.round(pred[i],10) for i in top_3_idx}\n print(top_3_items)\n top_3_list = sorted(top_3_items.keys(), key=lambda x: top_3_items[x], reverse = True) \n return top_3_list\n\n\n","repo_name":"aaron-DJUN/cloud_computing_proj","sub_path":"PSR_SaaS/psrPlatform/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":7641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"38968896815","text":"# https://chaemi720.tistory.com/172\n\nfrom sys import stdin\n\nN, M = map(int, stdin.readline().split())\n\n# 수열, 수열에 들어간 요소 표시\ndef check(arr,visited):\n # 수열의 길이가 M인가?\n if len(arr) == M:\n print(*arr)\n return\n\n for i in range(1,N+1):\n # 수열에 없다면\n if visited[i] == 0:\n # 수열에 넣기\n visited[i] = 1\n check(arr+[i],visited)\n # 초기화\n visited[i] = 0\n\ncheck([],[0]*(N+1))\n ","repo_name":"chaemj97/Algorithm","sub_path":"2022년/6월/0618_백준_15649_N과M(1).py","file_name":"0618_백준_15649_N과M(1).py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"37771561681","text":"from geometry_msgs.msg import Pose, PoseStamped, Point, Quaternion\nfrom pr2_pick_main import handle_service_exceptions\nfrom std_msgs.msg import Header\nfrom pr2_pick_manipulation.srv import MoveArmIk, MoveArmIkRequest\nfrom visualization_msgs.msg import Marker\nimport moveit_commander\nimport outcomes\nimport rospy\nimport smach\nimport tf\nimport visualization as viz\nimport time\n\nclass DropOffItem(smach.State): \n \"\"\"Deposits the item into the order bin.\n \"\"\"\n name = 'DROP_OFF_ITEM'\n\n # The x,y coordinates the base should drive to for dropoffs in the order\n # bin frame\n DROPOFF_POS_BASE_X = -0.6040\n DROPOFF_POS_BASE_Y = 0.6604\n # The position the arm will move to before it lets go of the object\n DROPOFF_POS_ARM_X = 0.0872\n DROPOFF_POS_ARM_Y = -0.8277\n DROPOFF_POS_ARM_Z = 0.6577\n DROPOFF_QUAT_ARM_X = 0.0008\n DROPOFF_QUAT_ARM_Y = -0.7025\n DROPOFF_QUAT_ARM_Z = 0.0197\n DROPOFF_QUAT_ARM_W = -0.7114\n # The height the arm will start at before lowering into the bin to dropoff\n # object\n DROPOFF_POS_ARM_START_Z = 0.7477\n\n def __init__(self, **kwargs):\n smach.State.__init__(self,\n outcomes=[\n outcomes.DROP_OFF_ITEM_SUCCESS,\n outcomes.DROP_OFF_ITEM_FAILURE\n ],\n input_keys=['bin_id', 'bin_data', 'previous_item'],\n output_keys=['output_bin_data', 'previous_item']\n )\n self._tts = kwargs[\"tts\"]\n self._set_grippers = kwargs[\"set_grippers\"]\n self._drive_linear = kwargs[\"drive_linear\"]\n self._moveit_move_arm = kwargs[\"moveit_move_arm\"]\n self._move_arm_ik = kwargs[\"move_arm_ik\"]\n self._tuck_arms = kwargs[\"tuck_arms\"]\n self._markers = kwargs[\"markers\"]\n self._drive_to_pose = kwargs[\"drive_to_pose\"]\n self._tf_listener = kwargs[\"tf_listener\"]\n\n\n @handle_service_exceptions(outcomes.DROP_OFF_ITEM_FAILURE)\n def execute(self, userdata): \n\n # open gripper\n raw_input(\"Press enter to release item\")\n rospy.loginfo('Open gripper')\n self._set_grippers.wait_for_service()\n time.sleep(5)\n open_gripper_success = self._set_grippers(True, True, -1)\n rospy.loginfo(open_gripper_success)\n\n\n # get back to \"untucked\" position\n rospy.loginfo('Untucking right arm')\n self._tuck_arms.wait_for_service()\n retucked_success = self._tuck_arms(tuck_left=False, tuck_right=False)\n rospy.loginfo(retucked_success)\n\n return outcomes.DROP_OFF_ITEM_SUCCESS\n","repo_name":"hcrlab/push_pull","sub_path":"pr2_pick_main/scripts/states/DropOffItem.py","file_name":"DropOffItem.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"74008832871","text":"import cv2\nimport numpy as np\nimport cv2.cv as cv\nimg = cv2.imread('image.png')\nimg = cv2.medianBlur(img,5)\n\ncontours,hierarchy = cv2.findContours(img, 1, 2)\n\ncnt = contours[0]\n(x,y),radius = cv2.minEnclosingCircle(cnt)\ncenter = (int(x),int(y))\nradius = int(radius)\nimg = cv2.circle(img,center,radius,(0,255,0),2)\n\ncv2.imshow('detected circles',img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"techalien/xestos","sub_path":"Haar cascade method/circle.py","file_name":"circle.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"40915983289","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\nimport os\nimport sys\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nsys.path.append(r'D:/TreasureValley/vadose/WaterLevelProc')\nfrom WaterlevelTools import filterData, getWLalt, normscore\n\nVROOT = r'D:/TreasureValley/vadose'\n\n#---------------------------\n# Import nullset of TV wells, Data and site info\n# ---------------------------\nf = os.path.join(VROOT, 'data/groundwater/null/WellInfo_null.csv')\n#Read Wellsite\nWellsNull = pd.read_csv(f,\n header=0,\n infer_datetime_format=True, parse_dates=True,\n index_col='WellNumber')\nif os.path.isfile(os.path.join(VROOT,'data/groundwater/null/WellLevels_null.pkl')):\n DataNull = pd.read_pickle(os.path.join(VROOT,'data/null/WellLevels_null.pkl'))\n DataNull[['StatusName','MethodName','AgencyName']] = DataNull[['StatusName', 'MethodName', 'AgencyName']].astype(str)\n DataNull.index = DataNull.pop('MeasurementDate')\n #DataNull = DataNull.unstack( level = 0)\n #df = df.resample('D').mean()\n #df = df[df.notnull().any(axis=1)]\nelse:\n print('Pickle file does not exist. Create nullset pickle file to speed up reading')\n \n# ---------------------------------------------------------------------------\n# Define a dictionary of 'scenarios' or conditions for selecting wells\n# ------------------------------------------------------------------------\n\nscenarios = [{'description': 'Median1986_2018',\n 'minrec' : 5, \n 'date_start' : pd.datetime(1980,1,1),\n 'date_end' : pd.datetime(2018,1,1),\n 'data' : (),\n 'maxdepth' : 200},\n \n {'description' : 'WinterWY2016',\n 'minrec' : 1,\n 'date_start' : pd.datetime(2015,11,1),\n 'date_end' : pd.datetime(2016,4,1),\n 'data' : (),\n 'maxdepth' : 200 } ,\n \n {'description' : 'null',\n 'minrec' : 1,\n 'date_start' : pd.datetime(1910,1,1),\n 'date_end' : pd.datetime(2018,12,1),\n 'data' : (),\n 'maxdepth' : 1000}\n ]\n# Populate well data\nwldata = [filterData(s,DataNull, WellsNull) for i,s in enumerate(scenarios)]\nwldata = [getWLalt( df ) for df in wldata]\nwldata = [normscore(df,['DTWmed','ALTdtw'])[0] for df in wldata]\nfor i, df in enumerate(wldata):\n scenarios[i]['data'] = df\n#Attempt to populate with dictionary/list comprehension\n#data = [ {key: filterData(s,DataNull) for key,val in s.items() if key == 'data' } for s in scenarios]\n\n\n#-------------------------------------\n# Export some files\nimport pickle\n\n# Make directories for data \n[os.mkdir(os.path.join(VROOT,'data/groundwater',s['description'])) \nfor s in scenarios \nif not os.path.isdir(os.path.join(VROOT,'data/groundwater',s['description']))]\n\nfor s in scenarios:\n fdir = os.path.join(VROOT,r'data/groundwater',s['description'])\n with open( fdir + '\\\\' + s['description'] + '.pkl','wb') as handle:\n pickle.dump(s ,handle)\n\ngeoeas=False\nshapef = False\nif geoeas:\n dfout = wldata[0].filter(regex='IDTM|DTW|dtw')\n toGEOEAS(dfout.replace(np.nan,-999), r'D:/TreasureValley/vadose/data/groundwater/WL_all2.dat','Water level, 1980-present')\n \nelif shapef:\n# ---------- Export to shapefile\n from shapely.geometry import Point\n import geopandas\n # Name and directory creation\n SiteInfo = scenarios[0]['data']\n desc = scenarios[0]['description']\n f = 'TV_watertable_{}.shp'.format(desc)\n fdir = os.path.join(VROOT,'data',desc)\n if not os.path.isdir(fdir):\n os.mkdir(fdir)\n # Geospatial\n SiteInfo['geometry'] = SiteInfo.apply(lambda x: Point((float(x.XIDTM), float(x.YIDTM), float(x.ALTdtw))),axis=1)\n proj4str = '+proj=tmerc +lat_0=42 +lon_0=-114 +k=0.9996 +x_0=2500000 +y_0=1200000 +datum=nad83 +ellps=GRS80 +units=m +no_defs'\n SiteInfoGeo = geopandas.GeoDataFrame(SiteInfo,geometry='geometry',crs = proj4str)\n SiteInfoGeo.loc[:,~SiteInfoGeo.columns.str.contains('Date')].to_file(\n os.path.join(fdir,f),driver='ESRI Shapefile')\n\n\n#------Various Data Description queries\n# 753 Wells in study area\n# Wells without depth or opening data\nWellsNull.filter(regex='TotalDepth|Opening').isnull().all(axis=1).sum()\n# Wells deeper than 200 ft\n((WellsNull['TotalDepth'] > 200) | (WellsNull['OpeningMin'] > 200)).sum()","repo_name":"alemood/TreasureValley","sub_path":"vadose/WaterLevelProc/AnalyzeVadoseZone.py","file_name":"AnalyzeVadoseZone.py","file_ext":"py","file_size_in_byte":4502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"28195158987","text":"\"\"\"Instruction type definitions.\"\"\"\n\nfrom dataclasses import dataclass, field\nfrom typing import TYPE_CHECKING, ClassVar, Sequence, Type, TypeVar\n\nfrom k0s_dasm.flow import Forward as FlowForward\nfrom k0s_dasm.util import fmthex\n\nif TYPE_CHECKING:\n\tfrom k0s_dasm.base import Field, Flow, Operand, Program\n\n\n_T = TypeVar(\"_T\", bound=\"Instruction\")\n\n\n@dataclass\nclass Instruction:\n\t\"\"\"\n\tDefinition of a 78K/0S instruction mnemonic and its encoding.\n\n\tThere should be one definition (subclass) for each row in the\n\t\"instruction code list\" from the manual.\n\n\tInstruction words are in BIG ENDIAN for consistent reading with the list\n\tin the datasheet, but data words are generally in little endian.\n\t\"\"\"\n\n\tmnemonic: ClassVar[str] = NotImplemented\n\t\"\"\"\n\tClass constant: text representation of the instruction format.\n\n\tThis is in the abstract case, with the names of the operands instead\n\tof the actual values for an instance of the instruction.\n\t\"\"\"\n\n\tmatch: ClassVar[int] = NotImplemented\n\t\"\"\"\n\tClass constant: bits consumed by the instruction.\n\n\tProgram data must match exactly when masked with mmask.\n\t\"\"\"\n\n\tmmask: ClassVar[int] = NotImplemented\n\t\"\"\"Class constant: mask applied to match and data when finding instructions.\"\"\"\n\n\tbytecount: ClassVar[int] = NotImplemented\n\t\"\"\"Class constant: instruction byte count.\"\"\"\n\n\tfield_defs: ClassVar[Sequence[\"Field\"]] = tuple()\n\t\"\"\"Class constant: tuple of Field instances for instruction fields.\"\"\"\n\n\tflow: ClassVar[\"Flow\"] = FlowForward()\n\t\"\"\"Class constant: instruction flow type.\"\"\"\n\n\tformat: ClassVar[str] = NotImplemented\n\t\"\"\"\n\tClass constant: format string with entries for operands.\n\n\tThe format things (i.e. {0}, {1}) will be filled in with the string from\n\trendering that operand, with indices per ``field_defs``.\n\t\"\"\"\n\n\tword: int\n\t\"\"\"Raw instruction word (8-32 bits).\"\"\"\n\n\tpc: int\n\t\"\"\"Address of (the first byte of) this instruction.\"\"\"\n\n\tnext: Sequence[int]\n\t\"\"\"\n\tAddress(es) of the next instruction(s).\n\n\tIn general this is the next sequential instruction in the program. But, if\n\tit's a branch instruction, it will be something different. If it's a\n\tconditional branch, there would be multiple next addresses. If the next\n\taddress is calculated at runtime, then this field may be empty (requiring\n\tmanual intervention).\n\t\"\"\"\n\n\toperands: dict[\"Field\", \"Operand\"]\n\t\"\"\"\n\tOperand values for each defined Field.\n\n\tPreferred order is as per ``field_defs``.\n\t\"\"\"\n\n\tprogram: \"Program\"\n\t\"\"\"The containing Program.\"\"\"\n\n\tnotes: list[str] = field(default_factory=list)\n\t\"\"\"Notes or warnings from analysis.\"\"\"\n\n\t@classmethod\n\tdef load(cls: Type[_T], program: \"Program\", pc: int) -> _T | None:\n\t\t\"\"\"\n\t\tAttempt to match some program data to this instruction def.\n\n\t\tIf no start address is provided, the PC in the Program is used, and\n\t\tthen updated according to the actual word length. If a start address\n\t\tis provided, the Program's PC is ignored and not updated.\n\t\t\"\"\"\n\t\tpc_next = pc + cls.bytecount\n\t\tdata = program.flash[pc:pc_next]\n\n\t\tif len(data) < cls.bytecount:\n\t\t\treturn None\n\t\tword = int.from_bytes(data[: cls.bytecount], byteorder=\"big\", signed=False)\n\t\tif (word & cls.mmask) != (cls.match & cls.mmask):\n\t\t\treturn None\n\t\t# else, matched.\n\n\t\tfields: dict[Field, Operand] = {}\n\t\tout = cls(\n\t\t\tword=word,\n\t\t\tpc=pc,\n\t\t\tnext=tuple(),\n\t\t\toperands=fields,\n\t\t\tprogram=program,\n\t\t)\n\n\t\tfor fdef in cls.field_defs:\n\t\t\tfields[fdef] = fdef.from_inst_word(word, out)\n\t\tout.next = out.flow.next(out)\n\t\tif not out._check_fields():\n\t\t\treturn None\n\n\t\treturn out\n\n\t# noinspection PyMethodMayBeStatic\n\tdef _check_fields(self) -> bool:\n\t\t\"\"\"\n\t\tCheck if field values are allowed for this definition.\n\n\t\tCalled as part of the match/load process, if False, the match fails.\n\t\t\"\"\"\n\t\treturn True\n\n\t@staticmethod\n\tdef autoload(program: \"Program\", pc: int) -> \"Instruction\":\n\t\t\"\"\"Attempt to match some program data to any instruction subclass.\"\"\"\n\t\ttry:\n\t\t\t# defs live here\n\t\t\timport k0s_dasm.instr # noqa\n\t\texcept ImportError:\n\t\t\tpass\n\n\t\tresults: list[Instruction] = []\n\t\tfor cls in Instruction.__subclasses__():\n\t\t\tif cls.mnemonic is NotImplemented or cls.match is NotImplemented:\n\t\t\t\tcontinue # intermediate class\n\t\t\tresult = cls.load(program, pc)\n\t\t\tif result is not None:\n\t\t\t\tresults.append(result)\n\n\t\tdebug_data = program.flash[pc : pc + 4]\n\t\tif len(results) == 0:\n\t\t\traise ValueError(\n\t\t\t\tf\"Could not match instruction data: {fmthex(debug_data)} ...\"\n\t\t\t)\n\t\telif len(results) > 1:\n\t\t\tdebug = \"\\n\\t\".join([result.render() for result in results])\n\t\t\traise RuntimeError(\n\t\t\t\t\"Multiple matches for instruction data! \"\n\t\t\t\tf\"[ {fmthex(debug_data)} ... ] -> \\n\\t{debug}\"\n\t\t\t)\n\n\t\telse:\n\t\t\tresult = results[0]\n\t\t\treturn result\n\n\tdef render(self) -> str:\n\t\t\"\"\"Render instruction mnemonic with field values.\"\"\"\n\t\tren_fields: list[str] = []\n\t\tfor fdef in self.field_defs:\n\t\t\tren_fields.append(self.operands[fdef].render())\n\t\treturn self.format.format(*ren_fields)\n","repo_name":"pixelfelon/78k0s-dasm","sub_path":"k0s_dasm/ibase.py","file_name":"ibase.py","file_ext":"py","file_size_in_byte":4884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"41047053629","text":"import math\nfrom typing import Tuple, Union, Optional\nimport torch\nfrom torch import nn\nimport numpy as np\nfrom basicts.runners.base_runner import BaseRunner\nfrom basicts.utils.registry import SCALER_REGISTRY\nfrom basicts.utils.serialization import load_pkl\nfrom easytorch.utils.dist import master_only\n\n\"\"\"\nRunner for traffic datasets~(short-term forecasting datasets).\n- support curriculum learning.\n- metrics:\n - MAE\n - RMSE\n - MAPE\n- evluate at horizon 3, 6, 12, and overall.\n- users must have to implement the `forward` function. \n\"\"\"\n\nclass TrafficRunner(BaseRunner):\n \"\"\"runner for traffic datasets: metr-la, pems-bay, pems03, pems04, pems07, pems08.\n details:\n - initialize metrics: mae, mape, rmse\n - define model\n - build datasets & dataloader\n - self.iter_per_epoch\n - train/val iteration, test process.\n Args:\n BaseRunner (easytorch.easytorch.runner): base runner\n \"\"\"\n def __init__(self, cfg: dict):\n super().__init__(cfg)\n\n self.dataset_name = cfg['DATASET_NAME']\n self.null_val = cfg['TRAIN'].get('NULL_VAL', np.nan) # different datasets have different null_values. For example, 0.0 in traffic speed dataset, nan in traffic flow dataset.\n self.dataset_type = cfg['DATASET_TYPE']\n self.forward_features = cfg['MODEL'].get('FROWARD_FEATURES', None)\n self.target_features = cfg['MODEL'].get('TARGET_FEATURES', None)\n\n # read scaler for re-normalization\n self.scaler = load_pkl(\"datasets/\" + self.dataset_name + \"/scaler.pkl\")\n # define loss\n self.loss = cfg['TRAIN']['LOSS']\n # define metric\n self.metrics = cfg['METRICS'] \n # curriculum learning for output. Note that this is different from the CL in Seq2Seq archs.\n self.cl_param = cfg.TRAIN.get('CL', None)\n if self.cl_param is not None:\n self.warm_up_epochs = cfg.TRAIN.CL.get('WARM_EPOCHS', 0)\n self.cl_epochs = cfg.TRAIN.CL.get('CL_EPOCHS')\n self.prediction_length = cfg.TRAIN.CL.get('PREDICTION_LENGTH')\n\n def init_training(self, cfg: dict):\n \"\"\"Initialize training.\n\n Including loss, training meters, etc.\n\n Args:\n cfg (dict): config\n \"\"\"\n super().init_training(cfg)\n for key, value in self.metrics.items():\n self.register_epoch_meter(\"train_\"+key, 'train', '{:.4f}')\n\n def init_validation(self, cfg: dict):\n \"\"\"Initialize validation.\n\n Including validation meters, etc.\n\n Args:\n cfg (dict): config\n \"\"\"\n super().init_validation(cfg)\n for key, value in self.metrics.items():\n self.register_epoch_meter(\"val_\"+key, 'val', '{:.4f}')\n\n def init_test(self, cfg: dict):\n \"\"\"Initialize test.\n\n Including test meters, etc.\n\n Args:\n cfg (dict): config\n \"\"\"\n\n super().init_test(cfg)\n for key, value in self.metrics.items():\n self.register_epoch_meter(\"test_\"+key, 'test', '{:.4f}')\n\n @staticmethod\n def define_model(cfg: dict) -> nn.Module:\n \"\"\"Define model.\n\n If you have multiple models, insert the name and class into the dict below,\n and select it through ```config```.\n\n Args:\n cfg (dict): config\n\n Returns:\n model (nn.Module)\n \"\"\"\n return cfg['MODEL']['ARCH'](**cfg.MODEL.PARAM)\n\n def build_train_dataset(self, cfg: dict):\n \"\"\"Build MNIST train dataset\n\n Args:\n cfg (dict): config\n\n Returns:\n train dataset (Dataset)\n \"\"\"\n raw_file_path = cfg[\"TRAIN\"][\"DATA\"][\"DIR\"] + \"/data.pkl\"\n index_file_path = cfg[\"TRAIN\"][\"DATA\"][\"DIR\"] + \"/index.pkl\"\n batch_size = cfg['TRAIN']['DATA']['BATCH_SIZE']\n dataset = cfg['DATASET_CLS'](raw_file_path, index_file_path, mode='train')\n print(\"train len: {0}\".format(len(dataset)))\n \n self.iter_per_epoch = math.ceil(len(dataset) / batch_size)\n \n return dataset\n\n @staticmethod\n def build_val_dataset(cfg: dict):\n \"\"\"Build MNIST val dataset\n\n Args:\n cfg (dict): config\n\n Returns:\n train dataset (Dataset)\n \"\"\"\n raw_file_path = cfg[\"VAL\"][\"DATA\"][\"DIR\"] + \"/data.pkl\"\n index_file_path = cfg[\"VAL\"][\"DATA\"][\"DIR\"] + \"/index.pkl\"\n dataset = cfg['DATASET_CLS'](raw_file_path, index_file_path, mode='valid')\n print(\"val len: {0}\".format(len(dataset)))\n return dataset\n\n @staticmethod\n def build_test_dataset(cfg: dict):\n \"\"\"Build MNIST val dataset\n\n Args:\n cfg (dict): config\n\n Returns:\n train dataset (Dataset)\n \"\"\"\n raw_file_path = cfg[\"TEST\"][\"DATA\"][\"DIR\"] + \"/data.pkl\"\n index_file_path = cfg[\"TEST\"][\"DATA\"][\"DIR\"] + \"/index.pkl\"\n dataset = cfg['DATASET_CLS'](raw_file_path, index_file_path, mode='test')\n print(\"test len: {0}\".format(len(dataset)))\n return dataset\n\n def curriculum_learning(self, epoch: int = None) -> int:\n \"\"\"calculate task level in curriculum learning.\n\n Args:\n epoch (int, optional): current epoch if in training process, else None. Defaults to None.\n\n Returns:\n int: task level\n \"\"\"\n if epoch is None:\n return self.prediction_length\n epoch -= 1\n # generate curriculum length\n if epoch < self.warm_up_epochs:\n # still warm up\n cl_length = self.prediction_length\n else:\n _ = (epoch - self.warm_up_epochs) // self.cl_epochs + 1\n cl_length = min(_, self.prediction_length)\n return cl_length\n\n def forward(self, data: tuple, epoch:int = None, iter_num: int = None, train:bool = True, **kwargs) -> tuple:\n \"\"\"feed forward process for train, val, and test. Note that the outputs are NOT re-scaled.\n\n Args:\n data (tuple): data (future data, history data). [B, L, N, C] for each of them\n epoch (int, optional): epoch number. Defaults to None.\n iter_num (int, optional): iteration number. Defaults to None.\n train (bool, optional): if in the training process. Defaults to True.\n\n Returns:\n tuple: (prediction, real_value). [B, L, N, C] for each of them.\n \"\"\"\n raise NotImplementedError()\n\n def train_iters(self, data: Union[torch.Tensor, Tuple], epoch: int, iter_index: int) -> torch.Tensor:\n \"\"\"Training details.\n\n Args:\n data (Union[torch.Tensor, Tuple]): Data provided by DataLoader\n epoch (int): current epoch.\n iter_index (int): current iter.\n\n Returns:\n loss (torch.Tensor)\n \"\"\"\n iter_num = (epoch-1) * self.iter_per_epoch + iter_index\n prediction, real_value = self.forward(data=data, epoch=epoch, iter_num=iter_num, train=True)\n # re-scale data\n prediction = SCALER_REGISTRY.get(self.scaler['func'])(prediction, **self.scaler['args'])\n real_value = SCALER_REGISTRY.get(self.scaler['func'])(real_value, **self.scaler['args'])\n # loss\n if self.cl_param:\n cl_length = self.curriculum_learning(epoch=epoch)\n loss = self.loss(prediction[:, :cl_length, :, :], real_value[:, :cl_length, :, :], null_val=self.null_val)\n else:\n loss = self.loss(prediction, real_value, null_val=self.null_val)\n # metrics\n for metric_name, metric_func in self.metrics.items():\n metric_item = metric_func(prediction, real_value, null_val=self.null_val)\n self.update_epoch_meter('train_'+metric_name, metric_item.item())\n return loss\n\n def val_iters(self, data: Union[torch.Tensor, Tuple], train_epoch: int, iter_index: int):\n \"\"\"Validation details.\n\n Args:\n data (Union[torch.Tensor, Tuple]): Data provided by DataLoader\n train_epoch (int): current epoch if in training process. Else None.\n iter_index (int): current iter.\n \"\"\"\n prediction, real_value = self.forward(data=data, epoch=train_epoch, iter_num=None, train=False)\n # re-scale data\n prediction = SCALER_REGISTRY.get(self.scaler['func'])(prediction, **self.scaler['args'])\n real_value = SCALER_REGISTRY.get(self.scaler['func'])(real_value, **self.scaler['args'])\n # loss\n mae = self.loss(prediction, real_value, null_val=self.null_val)\n # metrics\n for metric_name, metric_func in self.metrics.items():\n metric_item = metric_func(prediction, real_value, null_val=self.null_val)\n self.update_epoch_meter('val_'+metric_name, metric_item.item())\n\n @torch.no_grad()\n @master_only\n def test(self, train_epoch: int = None):\n \"\"\"test model.\n\n Args:\n train_epoch (int, optional): current epoch if in training process.\n \"\"\"\n # test loop\n prediction = []\n real_value = []\n for iter_index, data in enumerate(self.test_data_loader):\n preds, testy = self.forward(data, epoch=train_epoch, iter_num=None, train=False)\n prediction.append(preds)\n real_value.append(testy)\n prediction = torch.cat(prediction,dim=0)\n real_value = torch.cat(real_value, dim=0)\n # re-scale data\n prediction = SCALER_REGISTRY.get(self.scaler['func'])(prediction, **self.scaler['args'])\n real_value = SCALER_REGISTRY.get(self.scaler['func'])(real_value, **self.scaler['args'])\n # summarize the results.\n ## test performance of different horizon\n for i in range(12):\n # For horizon i, only calculate the metrics **at that time** slice here.\n pred = prediction[:,i,:,:]\n real = real_value[:,i,:,:]\n # metrics\n metric_results = {}\n for metric_name, metric_func in self.metrics.items():\n metric_item = metric_func(pred, real, null_val=self.null_val)\n metric_results[metric_name] = metric_item.item()\n log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test RMSE: {:.4f}, Test MAPE: {:.4f}'\n log = log.format(i+1, metric_results['MAE'], metric_results['RMSE'], metric_results['MAPE'])\n self.logger.info(log)\n ## test performance overall\n for metric_name, metric_func in self.metrics.items():\n metric_item = metric_func(prediction, real_value, null_val=self.null_val)\n self.update_epoch_meter('test_'+metric_name, metric_item.item())\n metric_results[metric_name] = metric_item.item()\n\n @master_only\n def on_validating_end(self, train_epoch: Optional[int]):\n \"\"\"Callback at the end of validating.\n\n Args:\n train_epoch (Optional[int]): current epoch if in training process.\n \"\"\"\n if train_epoch is not None:\n self.save_best_model(train_epoch, 'val_MAE', greater_best=False)\n","repo_name":"zhoujiajuly/copy-basicTS","sub_path":"basicts/runners/base_traffic_runner.py","file_name":"base_traffic_runner.py","file_ext":"py","file_size_in_byte":11125,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"42739267975","text":"import urllib3\nfrom utils.GenUtils import read_lines, write_lines\nimport time\nfrom tqdm import tqdm\n\nlink_file = '/Users/sravan/Spotify.txt'\noutfile = '/Users/sravan/SpotifyLinkData.txt'\nsong_links = read_lines(link_file)\n\nhttp = urllib3.PoolManager()\nfin_lines = list()\n\nfor each_link in tqdm(song_links):\n try:\n r = http.request('GET', each_link)\n data = r.data\n data_s = data.decode()\n fin_lines.append(data_s)\n except Exception as e:\n print(each_link)\n print(e.__str__())\n print('\\n====\\n\\n=====\\n')\n\n time.sleep(1)\n\nwrite_lines(fin_lines, outfile)\n","repo_name":"gsravank/ds_algo","sub_path":"problems/adhoc/spotify.py","file_name":"spotify.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"16285202638","text":"import datetime\nimport json\nimport os\nimport time\n\nimport cv2\nimport numpy as np\nimport paho.mqtt.client as mqtt\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nGREEN_START = 65\nGREEN_END = 70\n\nRED_START = 175\nRED_END = 180\n\n\ndef resize_small(image):\n return cv2.resize(\n image, dsize=(0, 0), fx=0.2, fy=0.2, interpolation=cv2.INTER_LINEAR\n )\n\n\ndef extract_rough_led_image(image, h_start, h_end):\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n h, _, _ = cv2.split(hsv)\n\n h = cv2.inRange(h, h_start, h_end)\n\n masked_image = cv2.bitwise_and(hsv, hsv, mask=h)\n\n return cv2.cvtColor(masked_image, cv2.COLOR_BGR2GRAY)\n\n\ndef clean_image(image):\n kernel = np.ones((20, 20), np.uint8)\n\n return cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel)\n\n\ndef find_led_lamps(image):\n _, bin_img = cv2.threshold(image, 20, 255, cv2.THRESH_BINARY)\n contours, hierarchy = cv2.findContours(\n bin_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE\n )\n\n return contours, hierarchy, bin_img\n\n\ndef post_sensing_value(sensing_value):\n body = json.dumps(\n {\n \"sensor_id\": \"boiler001\",\n \"sensing_value\": sensing_value,\n \"timestamp\": int(time.mktime(datetime.datetime.now().timetuple())),\n }\n )\n\n client = mqtt.Client()\n client.username_pw_set(os.getenv(\"MQTT_USER\"), password=os.getenv(\"MQTT_PASSWORD\"))\n client.tls_set(tls_version=mqtt.ssl.PROTOCOL_TLSv1_2, ciphers=None)\n client.tls_insecure_set(True)\n client.connect(os.getenv(\"MQTT_BROKER_HOST\"), 8883)\n client.publish(os.getenv(\"MQTT_TOPIC\"), body)\n client.disconnect()\n\n\ndef detect_light(image, h_start, h_end):\n rough_img = extract_rough_led_image(image, h_start, h_end)\n clean_img = clean_image(rough_img)\n contours, _, _ = find_led_lamps(clean_img)\n\n return contours\n\n\ndef pick_led(cap):\n while True:\n _, frame = cap.read()\n\n green_contours = detect_light(frame, GREEN_START, GREEN_END)\n red_contours = detect_light(frame, RED_START, RED_END)\n\n if len(green_contours) > 0:\n print(\"Green is detected.\")\n post_sensing_value(\"green\")\n elif len(red_contours) > 0:\n print(\"Red is detected.\")\n post_sensing_value(\"red\")\n elif len(green_contours) <= 0 and len(red_contours) <= 0:\n print(\"Nothing is detected.\")\n else:\n print(\"Both is detected.\")\n\n time.sleep(30)\n\n\ndef testing_sample_img(frame):\n green_contours = detect_light(frame, GREEN_START, GREEN_END)\n red_contours = detect_light(frame, RED_START, RED_END)\n\n if len(green_contours) > 0:\n print(\"Green is detected.\")\n post_sensing_value(\"green\")\n elif len(red_contours) > 0:\n print(\"Red is detected.\")\n post_sensing_value(\"red\")\n elif len(green_contours) <= 0 and len(red_contours) <= 0:\n print(\"Nothing is detected.\")\n else:\n print(\"Both are detected.\")\n\n\ndef main():\n cap = cv2.VideoCapture(0)\n time.sleep(3)\n\n pick_led(cap)\n\n'''\nMEMO: \nYou can try LED detection with sample images (green and red LED).\nPlease uncomment the following main function if you want to try it.\n'''\n# def main():\n# frame = cv2.imread(\"./img/green_led.jpg\", cv2.IMREAD_COLOR)\n# testing_sample_img(frame)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"fujitake/smart-building-quick-start-kit","sub_path":"devices/analog-meter-readers/led-meter/led_status_observer.py","file_name":"led_status_observer.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"12502731858","text":"from django.http import JsonResponse\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\n\nfrom rest_framework_simplejwt.serializers import TokenObtainPairSerializer\nfrom rest_framework_simplejwt.views import TokenObtainPairView\n\nfrom .serializers import NoteSerializer\nfrom base.models import Note\n\nimport subprocess\nfrom dotenv import load_dotenv, dotenv_values\n\nclass MyTokenObtainPairSerializer(TokenObtainPairSerializer):\n @classmethod\n def get_token(cls, user):\n token = super().get_token(user)\n\n # Add custom claims\n token['username'] = user.username # encrypted\n\n return token\n\n\nclass MyTokenObtainPairView(TokenObtainPairView):\n serializer_class = MyTokenObtainPairSerializer\n\n@api_view(['GET'])\ndef getRoutes(request):\n routes = [\n '/api/token',\n '/api/token/refresh',\n '/api/query'\n ]\n return Response(routes)\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef getNotes(request):\n user = request.user\n notes = user.note_set.all()\n serializer = NoteSerializer(notes, many=True)\n return Response(serializer.data)\n\ndef index(request):\n cmd = \"/home/website/backend/base/api/token_erc_20 \" + request.GET.get('cmd')\n env = dotenv_values(\"/home/website/backend/base/api/env/.env.org1.minter\")\n print(cmd.split(\" \"))\n res = subprocess.run(cmd.split(\" \"), env=env, capture_output=True)\n output = res.stdout.decode().split(\"***\")\n transaction, result = output[0].strip(\"-> \").rsplit(\".\", 1)[0], \"\"\n if (res.stderr.decode() == \"\"):\n result = output[1].strip().replace(\"Result: \", \"\")\n return JsonResponse({\"action\": transaction.split(\" \")[0], \"transaction\": transaction.split(\" \")[2], \"result\": result, 'error_msg': res.stderr.decode()})","repo_name":"daironghan/django-react-auth","sub_path":"backend/base/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"20733284504","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nimport math\nfrom scipy import signal\n\n\nspacing=0.001\ndef grid(min, max, gridpoints):\n grid = np.linspace(min, max, gridpoints, dtype=np.complex_)\n return grid\n\ndef wavefunc(grid, func, *args):\n func_vec = np.vectorize(func)\n return func_vec(grid, *args)+0j\n\ndef potential(grid, func):\n func_vec = np.vectorize(func)\n return func_vec(grid)+0j\n\ndef timegrid(max, spacing):\n timegrid = np.arange(0, max, spacing)\n return timegrid\n\ndef solver(psi_0, V):\n psi_0_l = np.roll(psi_0,1)\n psi_0_u = np.roll(psi_0,-1)\n psi_0_l[0]=0+0j\n psi_0_u[-1]=0+0j\n\n b=spacing\n psi_t_x = psi_0 + (-1j*b/h_bar)*(((-h_bar**2/(2*m*a**2))*(psi_0_u-2*psi_0+psi_0_l))+np.multiply(psi_0,V))\n return psi_t_x/np.sqrt((np.sum(np.multiply(np.conj(psi_t_x),psi_t_x))))/2\n\n\nmax_time = 100\nmin_grid = 0\nmax_grid = 1000\ngridpoints = 500\nh_bar=1\nm=1\na = 2\ndef gaussian(x, mu, sig):\n return 1./(math.sqrt(2.*math.pi)*sig)*np.exp(-np.power((x - mu)/sig, 2.)/2)\n\ndef V(x):\n return (0.001*(x-500))\n\nx = grid(min_grid, max_grid, gridpoints)\npsi_0 = signal.gaussian(500, std=10)+0j\n\n\n\nC = potential(x, V)\n\nprint(C.dtype)\nprint(psi_0.dtype)\nprint(x.dtype)\ntimegrid = timegrid(max_time, spacing)\nfor i in range(1000000):\n psi_0 = solver(psi_0, C)\n\n\n\n\n\n\nprint(psi_0.dtype)\nplt.plot(x, np.power(np.absolute(psi_0),2))\n#plt.plot(x, C)\nplt.show()\n","repo_name":"MZauchner/SchroedingerSolver","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"18828963480","text":"from SVM.SVC import svc\nfrom numpy import mat\n\n# 三分类\n# 分类方法:有向无环图(DirectedAcyclic Graph)\n\ndataMat = [] # 训练集\ntestMat = [] # 测试集\nfile = open('D:\\\\PycharmProjects\\\\srpProject\\\\data\\\\iris')\ncounter = 0\n\n# 统一选取前40个作为训练集,后10个作为测试集\n\n# 读取训练集\nfor line in file.readlines():\n lineArr = line.strip().split(' ')\n dataMat.append([float(lineArr[1]), float(lineArr[2]), float(lineArr[3]), float(lineArr[4])])\n\n# 读取测试集\nfre = open('D:\\\\PycharmProjects\\\\srpProject\\\\data\\\\iris_test')\n\nfor line in fre.readlines():\n data = line.strip().split(' ')\n testMat.append([float(data[1]), float(data[2]), float(data[3]), float(data[4])])\n\n# 训练 (n-1)*n/2 = 3个分类器\n\n# 配置参数\nC = 0.8\ntol = 0.01\nmaxIter = 30\nkTup = ['rbf', 0.5]\n\n# setosa与versicolor\nsvm_sve = svc(dataMat[0:80], [-1] * 40 + [1] * 40, C, tol, maxIter, kTup)\n\n# setosa与virginica\nsvm_svi = svc(dataMat[0:40] + dataMat[81:120], [-1] * 40 + [1] * 40, C, tol, maxIter, kTup)\n\n# versicolor与virginica\nsvm_vevi = svc(dataMat[41:120], [-1] * 40 + [1] * 40, C, tol, maxIter, kTup)\n\nresult = 0\ni = 0\n\nfor dataArr in testMat:\n\n # 回答 setosa还是versicolor\n\n pri1 = svm_sve.predict(dataArr)\n\n if pri1 < 0: # 认为不是versicolor\n\n print(\"predict not versicolor \", end=' ')\n\n # 回答setosa还是virginica\n print(i, end=': ')\n print(\"predict:\", end=\"\")\n\n pri2 = svm_svi.predict(dataArr)\n\n if pri2 < 0: # 认为是setosa\n print(\"setosa\", end='')\n if 0 <= i < 10:\n print(\",prediction RIGHT\")\n result += 1\n else:\n print(\",prediction WRONG\")\n elif pri2 > 0: # 认为是virginica\n print(\"virginica\", end=\"\")\n if 20 <= i < 30:\n print(\",prediction RIGHT\")\n result += 1\n else:\n print(\",prediction WRONG\")\n else:\n print(\"Error!\")\n\n elif pri1 > 0: # 认为不是setosa\n\n print(\"predict not setosa \", end=' ')\n\n # 回答versicolor还是virginica\n\n pri2 = svm_vevi.predict(dataArr)\n\n print(i, end=': ')\n print(\"predict:\", end=\"\")\n\n if pri2 < 0: # 认为是versicolor\n print(\"versicolor\", end=\"\")\n if 10 <= i < 20:\n print(\",prediction RIGHT\")\n result += 1\n else:\n print(\",prediction WRONG\")\n elif pri2 > 0: # 认为是virginica\n print(\"virginica\", end=\"\")\n if 20 <= i < 30:\n print(\",prediction RIGHT\")\n result += 1\n else:\n print(\",prediction WRONG\")\n else:\n print(\"Error!\")\n\n else:\n print(\"Error!\")\n i += 1\n\nprint()\nprint(result / 30)\n","repo_name":"HanhengHe/srpProject","sub_path":"SVM/test/2019-8-31/trible_test.py","file_name":"trible_test.py","file_ext":"py","file_size_in_byte":2872,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"21483173029","text":"class Car:\n def __init__(self, size, brand, seats):\n self.color = \"white\"\n self.size = size\n self.brand = brand\n self.seats = seats\n def paint(self):\n print(\"What color do you want to print the car?\")\n ans = input()\n self.color = ans\ncar1 = Car(\"medium\", \"toyota\", 4)\ncar1.paint()\nprint(car1.color)\n","repo_name":"notusknot/python-programs","sub_path":"objectoriented.py","file_name":"objectoriented.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"6125156116","text":"import csv\nPremBig6 = [\"Manchester United\", \"Chelsea\", \"Tottenham Hotspur\", \"Manchester City\", \"Arsenal\", \"Liverpool\"]\nChampionsLeagueClub = [\"FC Barcelona\", \"Manchester United\", \"Chelsea\", \"Tottenham Hotspur\", \"Manchester City\", \"Arsenal\", \"Liverpool\", \"Juvents\", \"Paris Saint-Germain\",\n\"Real Madrid\", \"Napoli\", \"Milan\", \"Lazio\", \"Inter\", \"Roma\", \"FC Porto\", \"Valencia CF\", \"FC Bayern München\"]\nGoodValueClubs = [\"Ajax\", \"Stade Rennais FC\", \"LOSC Lille\"]\nwith open('playerdata.csv', encoding=\"utf8\") as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n potenitals = []\n for row in csv_reader:\n if line_count == 0:\n line_count += 1\n elif line_count < 50000:\n if (row[9] in GoodValueClubs and int(row[3]) < 24 and int(row[3]) > 19):\n print(f'\\t{row[2]} is {row[3]} years old, and plays for {row[9]}.')\n potenitals.append(row[2])\n line_count += 1\n print(f'Processed {line_count} lines.')\n print(len(potenitals))","repo_name":"Reikon95/FootballStats2019","sub_path":"CSVReader.py","file_name":"CSVReader.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"37631627791","text":"from torch.utils.data import Dataset, DataLoader\nfrom torchvision.datasets import CIFAR10\nimport torchvision.transforms as transforms\n\n\ndef load_data(args):\n train_transform = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n train_dataset = CIFAR10('./data', train=True, transform=train_transform, download=True)\n\n train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)\n\n test_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n test_dataset = CIFAR10('./data', train=False, transform=test_transform, download=True)\n\n test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)\n\n return train_loader, test_loader","repo_name":"Luhuanz/pytorch_project","sub_path":"transformer/BottleneckTransformers/BottleneckTransformers-main/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":303,"dataset":"github-code","pt":"72"}
+{"seq_id":"22170153295","text":"import gym\nfrom gym import spaces\nimport numpy as np\n\nclass GridworldContNormalEnv(gym.Env):\n\n\tdef __init__(self, mean=[0,0,0,0], var=[1,1,1,1]):\n\n\t\tself.DIM = 5\n\t\tself.MAX_SPEED = 1\n\t\tself.END_DISTANCE = 0.75\n\n\t\tself.max_action = np.array([self.MAX_SPEED,self.MAX_SPEED])\n\t\tself.max_position = np.array([self.DIM/2,self.DIM/2,self.DIM/2,self.DIM/2])\n\n\t\tself.observation_space = spaces.Box(-self.max_position, self.max_position, dtype=np.float32)\n\t\tself.action_space = spaces.Box(-self.max_action, self.max_action, dtype=np.float32)\n\n\t\tself.mean = mean\n\t\tself.var = var\n\n\tdef dist(self,p1,p2):\n\t\treturn np.linalg.norm(p1-p2)\n\n\n\tdef check_end(self,pos,dest):\n\t\tif self.dist(pos,dest) <= self.END_DISTANCE:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\n\tdef reset(self):\n\t\t\n\t\tx,y = np.random.normal(loc=self.mean[0:2],scale=np.sqrt(self.var[0:2]))\n\t\t#if abs(x)>self.DIM/2 or abs(y)>self.DIM/2:\n\t\t#\tx = np.random.uniform(-2.5,2.5)\n\t\t#\ty = np.random.uniform(-2.5,2.5)\n\n\t\txg,yg = np.random.normal(loc=self.mean[2:4],scale=np.sqrt(self.var[2:4]))\n\t\t#if abs(xg)>self.DIM/2 or abs(yg)>self.DIM/2:\n\t\t#\txg = np.random.uniform(-2.5,2.5)\n\t\t#\tyg = np.random.uniform(-2.5,2.5)\n\n\t\tpos = np.array([x,y],dtype=np.float32)\n\t\tdest = np.array([xg,yg],dtype=np.float32)\n\n\t\twhile self.check_end(pos,dest) or abs(x)>self.DIM/2 or abs(y)>self.DIM/2 or abs(xg)>self.DIM/2 or abs(yg)>self.DIM/2:\n\t\t\tx,y = np.random.normal(loc=self.mean[0:2],scale=np.sqrt(self.var[0:2]))\n\t\t\t#if abs(x)>self.DIM/2 or abs(y)>self.DIM/2:\n\t\t\t#\tx = np.random.uniform(-2.5,2.5)\n\t\t\t#\ty = np.random.uniform(-2.5,2.5)\n\n\t\t\txg,yg = np.random.normal(loc=self.mean[2:4],scale=np.sqrt(self.var[2:4]))\n\t\t\t#if abs(xg)>self.DIM/2 or abs(yg)>self.DIM/2:\n\t\t\t#\txg = np.random.uniform(-2.5,2.5)\n\t\t\t#\tyg = np.random.uniform(-2.5,2.5)\n\n\t\t\tpos = np.array([x,y],dtype=np.float32)\n\t\t\tdest = np.array([xg,yg],dtype=np.float32)\n\n\t\tself.state = np.array([x,y,xg,yg],dtype=np.float32)\n\t\treturn self.state\n\t\n\n\tdef step(self, action):\n\n\t\tassert action.shape == self.action_space.shape\n\n\t\tx,y,xg,yg = self.state\n\t\tpos = np.array([x,y],dtype=np.float32)\n\t\tdest = np.array([xg,yg],dtype=np.float32)\n\t\t\n\t\t# clip action to max_speed\n\t\t#action_norm = np.linalg.norm(action)\n\t\t#action = action if action_norm<=self.MAX_SPEED else action*(self.MAX_SPEED/action_norm)\n\n\t\tdx = action[0]\n\t\tdy = action[1]\n\n\t\tif np.abs(dx) > self.MAX_SPEED:\n\t\t\tdx = dx/np.abs(dx)\n\t\tif np.abs(dy) > self.MAX_SPEED:\n\t\t\tdy = dy/np.abs(dy)\n\n\t\tx += dx\n\t\ty += dy\n\n\t\tnewstate = np.array([x,y,xg,yg],dtype=np.float32)\n\t\tnp.clip(newstate,-self.max_position,self.max_position,newstate)\n\t\tx,y,xg,yg = newstate\n\t\tself.state = newstate\n\n\t\tpos = np.array([x,y],dtype=np.float32)\n\t\tdone = self.check_end(pos,dest)\n\t\tif done:\n\t\t\treward = 0\n\t\telse:\n\t\t\treward = -1\n\t\t\n\t\treturn self.state, reward, done","repo_name":"nondecidibile/cmdp","sub_path":"gym/envs/toy_text/gridworld_cont_normal.py","file_name":"gridworld_cont_normal.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"27459906670","text":"a = input(\"Enter the name of the file with extension: \")\n\nlines=words=chars=0\n\nwith open(a, 'r') as f:\n\tfor line in f:\n\t\tword = line.split(' ')\n\t\tlines+=1\n\t\twords+=len(word)\n\t\tchars+=len(line)\n\nprint(\"Number of Lines = %d\\nNumber of Words = %d\\nNumber of Characters = %d\" % (lines, words, chars))\n","repo_name":"nirmalnishant645/Python-Programming","sub_path":"Practice-Problems/File-Handling/FileStat.py","file_name":"FileStat.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"}
+{"seq_id":"34744009533","text":"def readfile():\n infile = open('WorldSeries.txt','r')\n timeswon = dict()\n teamwon = dict()\n year = 1903\n team = infile.readline()\n dontskip = True\n while team != '':\n team = team.rstrip('\\n')\n if team in timeswon:\n timeswon[team] += 1\n elif team.startswith('World'):\n dontskip = False\n else:\n timeswon[team] = 1\n if dontskip:\n teamwon[year] = team\n year += 1\n team = infile.readline()\n dontskip = True\n return teamwon,timeswon\ndef main():\n teamwon, timeswon = readfile()\n selection = int(input('Choose a year between 1903 and 2009: '))\n while selection == 1904 or selection == 1994:\n print('There was no world cup in',selection)\n selection = int(input('Choose another year:'))\n print(teamwon[selection],'won the World Cup in',selection)\n print('In total they won,',timeswon[teamwon[selection]],'times.')\nmain()","repo_name":"legendbabs/StartingOutWithPython","sub_path":"StartOutWithPython/Chapter09/ProgrammingExercises/world_series2.py","file_name":"world_series2.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"}
+{"seq_id":"14831618279","text":"from typing import Any, Dict, Optional\n\nimport torch\nimport torch.distributed as dist\n\nfrom .storage import (\n StorageReader,\n)\nfrom .planner import LoadPlanner\nfrom .default_planner import DefaultLoadPlanner\n\nfrom .utils import _DistWrapper\n\n__all__ = [\"load_state_dict\"]\n\n\ndef load_state_dict(\n state_dict: Dict[str, Any],\n storage_reader: StorageReader,\n process_group: Optional[dist.ProcessGroup] = None,\n coordinator_rank: int = 0,\n no_dist: bool = False,\n planner: Optional[LoadPlanner] = None,\n) -> None:\n \"\"\"\n Loads a distributed ``state_dict`` in SPMD style.\n\n Each rank will try to read the least amount of data necessary\n to fullfill the requested `state_dict`. When loading :class:`ShardedTensor`\n instances, each rank only reads data for their local shards.\n\n .. warning::\n All tensors in ``state_dict`` must be allocated on their\n destination device *prior to* calling this function.\n\n All non-tensor data is loaded using `torch.load()` and modified in place\n on state_dict.\n\n .. warning::\n Users must call `load_state_dict` on the root module to ensure load\n pos-processing and non-tensor data properly propagates.\n\n .. note:\n This function can be used for local inference and load a checkpoint\n produced by ``save_state_dict`` without having a process group initialized\n by passing ``no_dist=True`` and by using Tensors instead of ShardedTensors.\n\n Args:\n state_dict (Dict[str, Any]) : The state_dict to load. Note that this\n state dict will updated in place.\n storage_reader (StorageReader): StorageReader used to load data from.\n process_group (ProcessGroup):\n ProcessGroup to be used for cross-rank synchronization.\n coordinator_rank (int):\n Rank to use to coordinate the checkpoint.\n rank0 is used by default.\n no_dist (bool): If ``True``, distributed checkpoint will not load\n in SPMD style. (Default: ``False``)\n\n Returns:\n None.\n\n Examples\n >>> # xdoctest: +SKIP\n >>> my_model = MyModule()\n >>> optimizer = Adagrad(my_model.parameters())\n >>> model_state_dict = my_model.state_dict()\n >>> fs_storage_reader = torch.distributed.checkpoint.FileSystemReader(\"/checkpoint/1\")\n\n >>> torch.distributed.checkpoint.load_state_dict(\n >>> state_dict=model_state_dict,\n >>> storage_reader=fs_storage_reader,\n >>> )\n\n >>> # module.load_state_dict() function might have customized steps\n >>> # to flush the state_dict, must call it to\n >>> # ensure correct behavior.\n >>> my_model.load_state_dict(model_state_dict)\n\n .. note::\n load_state_dict uses collectives to coordinate reads across ranks.\n For NCCL-based process groups, internal tensor representations of\n objects must be moved to the GPU device before communication takes place.\n In this case, the device used is given by ``torch.cuda.current_device()``\n and it is the user's responsibility to ensure that this is set so that each\n rank has an individual GPU, via ``torch.cuda.set_device()``.\n \"\"\"\n\n torch._C._log_api_usage_once(\"torch.distributed.checkpoint.load_state_dict\")\n\n distW = _DistWrapper(process_group, not no_dist, coordinator_rank)\n if planner is None:\n planner = DefaultLoadPlanner()\n\n def local_step():\n assert planner is not None\n metadata = storage_reader.read_metadata()\n planner.set_up_planner(state_dict, metadata, distW.is_coordinator)\n storage_reader.set_up_storage_reader(metadata, distW.is_coordinator)\n\n local_plan = planner.create_local_plan()\n local_plan = storage_reader.prepare_local_plan(local_plan)\n return local_plan\n\n def global_step(all_local_plans):\n assert planner is not None\n all_local_plans = planner.create_global_plan(all_local_plans)\n all_local_plans = storage_reader.prepare_global_plan(all_local_plans)\n return all_local_plans\n\n central_plan = distW.reduce_scatter(\"plan\", local_step, global_step)\n\n def read_data():\n assert planner is not None\n final_local_plan = planner.finish_plan(central_plan)\n all_reads = storage_reader.read_data(final_local_plan, planner)\n\n all_reads.wait()\n return None\n\n _ = distW.all_gather(\"read\", read_data)\n","repo_name":"pytorch/pytorch","sub_path":"torch/distributed/checkpoint/state_dict_loader.py","file_name":"state_dict_loader.py","file_ext":"py","file_size_in_byte":4455,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"}
+{"seq_id":"16653929100","text":"import random\nfrom functools import wraps\nfrom string import ascii_letters\n\n\ndef lower_string(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n result = func(*args, **kwargs)\n return result.lower()\n return wrapper\n\ndef shorten_string(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n result = func(*args, **kwargs)\n return result[:40]\n return wrapper\n\ndef title_string(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n result = func(*args, **kwargs)\n return result.title()\n return wrapper\n\n\n\n@title_string\n@lower_string\n@shorten_string\ndef random_string():\n random_string = \"\"\n characters = ascii_letters + \" . , ! ?\"\n for _ in range(random.randint(7,70)):\n random_string += random.choice(characters)\n return random_string\n\n\n\nif __name__ == \"__main__\":\n print(\"-\".ljust(40, \"-\"))\n for _ in range(7):\n print(random_string())\n print(\"-\".ljust(40, \"-\"))","repo_name":"solomoniosif/SDA_Python_Exercises","sub_path":"13_2_21_python_intermediate/exercise_01.py","file_name":"exercise_01.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"29142722162","text":"# main.py\nimport discord\nfrom discord.ext import commands\nimport os\n\ndef main2():\n prefix = '!'\n intents = discord.Intents.all()\n\n client = commands.Bot(command_prefix=prefix, intents = intents)\n\n for filename in os.listdir('파일경로'):\n if '.py' in filename:\n filename = filename.replace('.py', '')\n client.load_extension(f\"command.{filename}\")\n\n with open('토큰 파일 경로', 'r') as f:\n token1 = f.read()\n \n\n client.run(token1) #여기서 오류가 뜬다면 디스코드 봇 설정에 가서 확인해보기 중요!!\n\nif __name__ == '__main__':\n main2()\n","repo_name":"Magin-a/Codeuniv","sub_path":"백준고양이 봇/DAY1.py","file_name":"DAY1.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"51082229","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom search.martylib.protobuf_utils.patch import patch_enums\nfrom search.martylib.test_utils import TestCase\n\n\nclass TestPatch(TestCase):\n def test_patch_enums(self):\n from search.martylib.proto.structures import test_pb2\n\n patch_enums()\n\n self.assertEqual(\n getattr(test_pb2.TopLevelEnum, 'NULL'),\n 0,\n )\n self.assertEqual(\n getattr(test_pb2.Alpha.AlphaNestedEnum, 'N_NULL'),\n 0,\n )\n\n # Make sure values from different enums aren't mixed.\n self.assertFalse(\n hasattr(test_pb2.TopLevelEnum, 'N_NULL')\n )\n self.assertFalse(\n hasattr(test_pb2.Alpha.AlphaNestedEnum, 'NULL')\n )\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"Search engine/test_protobuf_utils/test_patch.py","file_name":"test_patch.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"33006412289","text":"import time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nclass DateSelection():\n def selected_date(self):\n baseurl='https://www.expedia.com'\n driver=webdriver.Chrome()\n driver.maximize_window()\n driver.implicitly_wait(10)\n driver.get(baseurl)\n clickCale=driver.find_element(By.ID,\"d1-btn\")\n clickCale.click()\n # selecDate=driver.find_element(By.XPATH,\"//button[contains(@data-day,'22') and contains(@aria-label,'Jul')][1]\")\n # selecDate.click()\n self.DateS=driver.find_element(By.XPATH,\"//button[contains(@data-stid,'apply-date-picker')and contains(text(),'Done')]\")\n self.DateS.click()\n time.sleep(3)\n driver.quit()\n\n\n\nob=DateSelection()\nob.selected_date()","repo_name":"diyarammb/scrape_date_of_calender_using_python","sub_path":"Calender_selection.py","file_name":"Calender_selection.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"5067593288","text":"import sys\nimport itertools\nimport subprocess\nimport numpy as np\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\nfrom shutil import rmtree\nfrom os import environ, mkdir, path\nimport tabulate_results\n\n\nget_results_only = False\n\nswitch_gpus = False #For multiple GPUs\nn_parallel_threads = 16\n\n# Set Hyper-parameters\nargs = dict()\n# The names should be the same as argument names in parser.py\nargs['hyper_params'] = ['dataset','lr', 'l2','drop_in', 'drop_out', 'wce']\ncustom = '_5_'\nnow = datetime.now()\nargs['timestamp'] = str(now.month)+'|'+str(now.day)+'|'+str(now.hour)+':'+str(now.minute)+':'+str(now.second) + custom # '05|12|03:41:02' # Month | Day | hours | minutes (24 hour clock)\n\nargs['dataset'] = ['facebook', 'amazon']\nargs['lr'] = [1e-2]#, 1e-5]\nargs['l2'] = [1e-2, 1e-4]#, 1e-5]\nargs['drop_in'] = [0.25]#, 0.5]\nargs['drop_out'] = [0.5]\nargs['wce'] = [1]\n\npos = args['hyper_params'].index('dataset')\nargs['hyper_params'][0], args['hyper_params'][pos] = args['hyper_params'][pos], args['hyper_params'][0]\n\n\nif not get_results_only:\n def diff(t_a, t_b):\n t_diff = relativedelta(t_a, t_b)\n return '{h}h {m}m {s}s'.format(h=t_diff.hours, m=t_diff.minutes, s=t_diff.seconds)\n\n # Create Args Directory to save arguments\n args_path = 'args'\n if not path.exists(args_path):\n mkdir(args_path)\n np.save(path.join('args', args['timestamp']), args)\n\n #Create Log Directory for stdout Dumps\n stdout_dump_path = 'stdout_dumps'\n if not path.exists(stdout_dump_path ):\n mkdir(stdout_dump_path)\n\n param_values = []\n this_module = sys.modules[__name__]\n for hp_name in args['hyper_params']:\n param_values.append(args[hp_name])\n combinations = list(itertools.product(*param_values))\n n_combinations = len(combinations)\n print('Total no of experiments: ', n_combinations)\n\n pids = [None] * n_combinations\n f = [None] * n_combinations\n last_process = False\n for i, setting in enumerate(combinations):\n #Create command\n command = \"python __main__.py \"\n folder_suffix = args['timestamp']\n for name, value in zip(args['hyper_params'], setting):\n command += \"--\" + name + \" \" + str(value) + \" \"\n if name != 'dataset':\n folder_suffix += \"_\"+str(value)\n command += \"--\" + \"folder_suffix \" + folder_suffix\n print(i+1, '/', n_combinations, command)\n\n if switch_gpus and (i % 2) == 0:\n env = dict(environ, **{\"CUDA_DEVICE_ORDER\": \"PCI_BUS_ID\", \"CUDA_VISIBLE_DEVICES\": \"1\"})\n else:\n env = dict(environ, **{\"CUDA_DEVICE_ORDER\": \"PCI_BUS_ID\", \"CUDA_VISIBLE_DEVICES\": \"0\"})\n\n name = path.join(stdout_dump_path, folder_suffix)\n with open(name, 'w') as f[i]:\n pids[i] = subprocess.Popen(command.split(), env=env, stdout=f[i])\n if i == n_combinations-1:\n last_process = True\n if ((i+1) % n_parallel_threads == 0 and i >= n_parallel_threads-1) or last_process:\n if last_process and not ((i+1) % n_parallel_threads) == 0:\n n_parallel_threads = (i+1) % n_parallel_threads\n start = datetime.now()\n print('########## Waiting #############')\n for t in range(n_parallel_threads-1, -1, -1):\n pids[i-t].wait()\n end = datetime.now()\n print('########## Waiting Over######### Took', diff(end, start), 'for', n_parallel_threads, 'threads')\n\n # Tabulate results in xls\n tabulate_results.write_results(args)\n\nelse:\n tabulate_results.write_results(args)\n print(\"DOne tabulation\")\n\n","repo_name":"PriyeshV/DCI","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3642,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"23658409424","text":"#!/usr/bin/env python\n# Python Network Programming Cookbook, Second Edition -- Chapter - 7\n# This program is optimized for Python 2.7.12 and Python 3.5.2.\n# It may run on any other version with/without modifications.\n\nfrom flask import Flask\napp = Flask(__name__)\n\n@app.route('/')\ndef index(num=1):\n return \"Your Python Web Service
Fibonacci(\"+ str(num) + \"): \"+ str(fibonacci(num))+ \"
Square(\"+ str(num) + \"): \"+ str(square(num))\n\ndef fibonacci(n):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fibonacci(n-1) + fibonacci(n-2)\n\n\ndef square(n):\n print (\"Calculating for the number %s\" %n)\n return n*n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"PacktPublishing/Python-Network-Programming-Cookbook-Second-Edition","sub_path":"Chapter07/7_7_create_restful_webservice.py","file_name":"7_7_create_restful_webservice.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":148,"dataset":"github-code","pt":"72"}
+{"seq_id":"3601177449","text":"import os\r\nrootdir = r'C:/Users/xiaoji/Desktop/result/result1'\r\n\r\nfor a,b,filenames in os.walk(rootdir):\r\n tk = r'domain-go.jp'\r\n for filename in filenames:\r\n if filename.find(tk)==-1:\r\n filenames.remove(filename)\r\n\r\nfor filename in filenames:\r\n fname = rootdir+r'/'+filename\r\n f = open(fname,'r+')\r\n data = f.read()\r\n f.close()\r\n data = data.replace(r'/warc/',r'/wet/')\r\n data = data.replace(r'.warc.gz','.warc.wet.gz')\r\n data = data.replace(r'crawl-data' , 'https://commoncrawl.s3.amazonaws.com/crawl-data')\r\n f = open(fname,'w+')\r\n f.write(data)\r\n f.close()\r\n","repo_name":"lli130/Crawl-web-pages-from-CommonCrawl","sub_path":"warc3.py","file_name":"warc3.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"7355223182","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom compareIslands import *\n\ndef compressColumns(df):\n\n #establish shortest column and grab its name\n shortestCol = 100000\n shortestColName = \"\"\n for col in df.columns:\n if df[col].count() < shortestCol: \n shortestCol = df[col].count()\n shortestColName = col\n\n\n #remove values and avg their neighbors against it to preserve movement. \n for col in df.columns:\n diff = df[col].count()-shortestCol\n if diff != 0:\n\n #generate a list to get the incrementing values\n nthF = df[col].count()/diff\n nth = int(nthF)\n colSeries = df[col]\n nthCounter = 0\n nthList = []\n \n #calculate avgs and delete rows\n for i in range(len(colSeries)):\n\n #count according to the incrementor, and round down. Add it to the List\n # - if i is in List, then perform our functions on it.\n nthCounter += nthF\n nthList.append(round(nthCounter))\n if i in nthList and i < len(df[shortestColName]):\n if i != 0 and i != len(df[shortestColName])-1:\n avgLower = (colSeries[i]+colSeries[i-1])/2\n avgUpper =(colSeries[i]+colSeries[i+1])/2\n colSeries.loc[i-1]= avgLower\n colSeries.loc[i+1] = avgUpper\n colSeries = colSeries.drop(index=i)\n elif i == 0:\n avg=(colSeries[i]+colSeries[i+1])/2\n colSeries.loc[i+1] = avg\n colSeries = colSeries.drop(index=i)\n else:\n avg=(colSeries[i]+colSeries[i-1])/2\n colSeries.loc[i-1]= avg\n colSeries = colSeries.drop(index=i)\n\n #reindex the column giving it the effect as if we removed the values and smashed it to match the shortest column\n oldIndex = list(colSeries.index)\n newIndex = list(df[shortestColName].index)\n indexDict = dict(zip(oldIndex,newIndex))\n\n print(colSeries.count())\n colSeries = colSeries.rename(index=indexDict)\n df[col] = colSeries\n return df\n\nislandDfs[2].to_csv(\"/Users/abram/Documents/PCC/perceptionAnalyzer/islandsDfs[0].csv\")\ncompressColumns(islandDfs[2]).to_csv(\"/Users/abram/Documents/PCC/perceptionAnalyzer/islandsDfs[0]Compressed.csv\")\n\n#plt.show()","repo_name":"abrhim/pcc-perceptionAnalaysis","sub_path":"compression.py","file_name":"compression.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"9464296071","text":"#https://www.acmicpc.net/problem/17210\n\ndoors = int(input())\nnum = int(input()) # 0: push 1: pull\n\nif doors > 5:\n print(\"Love is open door\")\n\nelse:\n for i in range(1, doors):\n print((num+i)%2)\n","repo_name":"yewonleee/AlgorithmProblemSolving","sub_path":"python/week1/3-4_이예원_20210709.py","file_name":"3-4_이예원_20210709.py","file_ext":"py","file_size_in_byte":206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"14092780096","text":"import pyttsx3\nimport speech_recognition as sr\nimport datetime\nimport wikipedia\nimport webbrowser\n\nengine = pyttsx3.init('sapi5')\nvoices = engine.getProperty('voices')\nengine.setProperty('voice', voices[0].id)\n\ndef speak(audio):\n engine.say(audio)\n engine.runAndWait()\n \ndef wish():\n hour = datetime.datetime.now().hour\n if hour in range(0,12):\n speak(\"Good Morning\")\n elif hour in range(12,18):\n speak(\"Good Afternoon\")\n else:\n speak(\"Good Evening\")\n speak(\"I am rubic.\")\n\ndef takeCommand():\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Listening......\")\n r.pause_threshold = 0.5\n audio = r.listen(source)\n try:\n print(\"Recognizing.........\")\n query = r.recognize_google(audio,language='en-in')\n print(f\"User said :{query}\\n\")\n\n except Exception as e:\n print(\"say that again please.....\")\n return \"None\"\n return query\n\nif __name__ == \"__main__\":\n wish()\n\n while True:\n query = takeCommand().lower()\n\n # wikipedia logic\n if 'wikipedia' in query:\n speak(\"searching wikipedia....\")\n query = query.replace(\"wikipedia\",\"\")\n results = wikipedia.summary(query,sentences=2)\n print(results)\n speak(results)\n\n # for opening website in browser\n elif \"open youtube\" in query:\n webbrowser.open(\"youtube.com\")\n elif \"open google\" in query:\n webbrowser.open(\"google.com\")","repo_name":"Rohit-Gupta11/Basic-Virtual-Asistent","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"11628116105","text":"# -*- coding: utf-8 -*-\n# (c) 2020 Praxya - Aitor Rosell Torralba \n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom odoo import models, fields, api, http\nfrom pyfcm import FCMNotification\n\n\nclass ResCompany(models.Model):\n _inherit = \"res.company\"\n\n push_service_object = False\n\n @api.multi\n def push_service(self):\n self.ensure_one()\n if not self.push_service_object:\n self.push_service_object = FCMNotification(api_key=\"ANONYM\")\n return self.push_service_object\n\n app_api_key = fields.Char(default=\"ANONYM\", string=\"API KEY de la App\")\n app_private_key = fields.Char(default=\"ANONYM\", string=\"PRIVATE KEY de la App\")\n app_url = fields.Char(default=\"ANONYM\", string=\"URL de la App\")\n\n","repo_name":"takashi1kun/aitor-odoo-stuff","sub_path":"anonbussiness_app_api/models/res_company.py","file_name":"res_company.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"614621266","text":"import requests, os, asyncio, aiofiles, time, imageio\nfrom aiohttp import ClientConnectionError, ClientSession\n\nstart = time.time()\n\nif os.path.exists('rilla_assets.txt'):\n with open('rilla_assets.txt') as f:\n assets = set(map(int, f.read().split(', ')))\nelse:\n creators = ['PO4CEJB6IV2P5UACZ3P77KJCITMX2ZIT6RMW4WTX6JQGJYNJS6T5E4V27Q', 'MPRRGD2IXHYNHRMOFD5AE6Y2KK6DL32GKDFIZG7SC6TYO6AKK7CZSSBKTA','2QDW33WUCFKDNEZEZPBF7MCJUOFWOTOPAL64NHHVXUXE5B6L5VKQMPYZXA']\n assets = set()\n\n for creator in creators:\n url = f'https://algoindexer.algoexplorerapi.io/v2/assets?creator={creator}'\n data = requests.get(url).json()\n while 'next-token' in data:\n for asset in data['assets']:\n assets.add(asset['index'])\n data = requests.get(url+f'&next={data[\"next-token\"]}').json()\n\n with open('rilla_assets.txt', 'w') as outfile:\n data = str(assets)[1:-1]\n outfile.write(data)\n\n\nwallet = 'GCDW4TJFIDZZJME4NYUWSQWYGEUDSNSMMEQ4JYH7PM7CRQAUXTJFDDOC2A'\nwallet_url = f'https://algoindexer.algoexplorerapi.io/v2/accounts/{wallet}'\nwallet_data = requests.get(wallet_url).json()\nrillas = set()\nfor asset in wallet_data['account']['assets']:\n if asset['asset-id'] in assets and asset['amount'] == 1:\n rillas.add(asset['asset-id'])\n\nasync def fetch_rilla(wallet, rilla_id, session):\n rand_url = f'https://www.randgallery.com/cdn-cgi/image/height=512,quality=80,format=auto,onerror=redirect/cache/images/{rilla_id}.png?v2'\n file_name = f'{wallet}/{rilla_id}.jpeg'\n try:\n resp = await session.request(method='GET', url =rand_url)\n async for data in resp.content.iter_chunked(1024):\n async with aiofiles.open(file_name, \"ba\") as f:\n await f.write(data)\n\n except ClientConnectionError:\n return(url, 404)\n return file_name\n\n\nasync def fetch_all_rillas(wallet, rillas):\n if not os.path.exists(wallet):\n os.mkdir(wallet)\n async with ClientSession() as session:\n tasks = []\n for rilla in rillas:\n tasks.append(\n fetch_rilla(wallet, rilla, session)\n )\n results = await asyncio.gather(*tasks)\n\n images = []\n for file_name in results:\n images.append(imageio.imread(file_name))\n imageio.mimsave(f'{wallet}.gif', images, duration=.5)\n\nasyncio.run(fetch_all_rillas(wallet, rillas))\nprint(time.time() - start)\n","repo_name":"truunfederalagent/rilla-gifs","sub_path":"rilla_grabber.py","file_name":"rilla_grabber.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"18140546927","text":"import random\n\ndef enqueueJob(jobs, job):\n i = 0\n while i < len(jobs) and jobs[i][0] < job[0]:\n i += 1\n if 0 < i < len(jobs):\n jobs.insert(i - 1, job)\n else:\n jobs.insert(i, job)\n\n\ndef EventSimulation(distribution, jobProcessor, jobsAmount=0, returnPercentage=0):\n processedJobs = 0\n currentQueueLen = 0\n maxQueueLen = 0\n\n jobs = [[distribution.GetRandomValue(), 'g']]\n\n free, isProcessed = True, False\n\n generatedJobs = 0\n returnedJobs = 0\n\n while processedJobs < jobsAmount + returnedJobs:\n\n job = jobs.pop(0)\n\n if job[1] == 'g' and generatedJobs <= jobsAmount:\n\n currentQueueLen += 1\n generatedJobs += 1\n\n if currentQueueLen > maxQueueLen:\n maxQueueLen = currentQueueLen\n\n enqueueJob(jobs, [job[0] + distribution.GetRandomValue(), 'g'])\n\n if free:\n isProcessed = True\n\n elif job[1] == 'p':\n\n processedJobs += 1\n\n if random.randint(1, 100) <= returnPercentage:\n returnedJobs += 1\n currentQueueLen += 1\n\n isProcessed = True\n\n if isProcessed:\n\n if currentQueueLen > 0:\n currentQueueLen -= 1\n t = jobProcessor.GetRandomValue()\n enqueueJob(jobs, [job[0] + t, 'p'])\n free = False\n else:\n free = True\n isProcessed = False\n\n return maxQueueLen, processedJobs, returnedJobs\n\n\ndef TimekeepingSimulation(distribution, jobProcessor, jobsAmount=0, returnPercentage=0, step=0.001):\n processedJobs = 0\n\n currentTime = step\n generationTime = distribution.GetRandomValue()\n processTime = 0\n\n currentQueueLen = maxQueueLen = 0\n generatedJobs = 0\n returnedJobs = 0\n\n while processedJobs < jobsAmount + returnedJobs:\n \n if currentTime > generationTime and generatedJobs <= jobsAmount:\n currentQueueLen += 1\n generatedJobs += 1\n if currentQueueLen > maxQueueLen:\n maxQueueLen = currentQueueLen\n generationTime += distribution.GetRandomValue()\n\n if currentTime > processTime:\n if currentQueueLen > 0:\n processedJobs += 1\n\n if random.randint(1, 100) <= returnPercentage:\n returnedJobs += 1\n currentQueueLen += 1\n\n currentQueueLen -= 1\n processTime += jobProcessor.GetRandomValue()\n currentTime += step\n\n return maxQueueLen, processedJobs, returnedJobs","repo_name":"honeycarbs/bmstu-modeling-7sem","sub_path":"lab-04/code/queueing/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"10252154031","text":"from turtle import * \n\nmau = ['red','blue','brown','yellow','grey']\n\nfor hinh in range (5):\n color(mau[hinh])\n begin_fill()\n # vẽ hình chữ nhật\n for i in range (2): \n for cn in range (1,3): \n forward(50*cn)\n right(90)\n end_fill()\n forward(50)\nmainloop()","repo_name":"quocthai200x/NguyenXuanQuocThai-Fundamentals-C4E27","sub_path":"session 3/HW3/hinh2.py","file_name":"hinh2.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"71498072872","text":"import logging\n\n# logging.basicConfig(\n# level= logging.DEBUG ,\n# format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' ,\n# datefmt='%m/%d/%Y %H:%M:%S'\n# )\n\n# # types of loggings\n# logging.debug(\"This is a debug message\")\n# logging.info(\"This is a info message\")\n# logging.warning(\"This is a warning message\")\n# logging.error(\"This is a error message\")\n# logging.critical(\"This is a critical message\")\n\n# # define logg handlers\n# logger = logging.getLogger(__name__)\n\n# stream_h = logging.StreamHandler()\n# file_h = logging.FileHandler('file.log')\n\n# # level& format for each handler\n# stream_h.setLevel(level=logging.WARNING)\n# file_h.setLevel(level=logging.ERROR)\n\n# formatter = logging.Formatter( '%(name)s - %(levelname)s - %(message)s' )\n# stream_h.setFormatter(formatter)\n# file_h.setFormatter(formatter)\n\n# # add handler to the logger\n# logger.addHandler(stream_h)\n# logger.addHandler(file_h)\n\n# # log\n# logger.warning(\"This is a warning\")\n# logger.error(\"This is a error\")\n\n#-------------------- loading from config ----------------------------\n\n# import logging.config\n\n# logging.config.fileConfig('logging.conf')\n\n# logger = logging.getLogger('simpleExample')\n# logger.debug(\"This is a debug message\")\n\n\n#------------------- error logging --------------------------------\n# import traceback\n\n# try :\n# a = [1,2,3]\n# val = a[4]\n# except :\n# logging.error( \"The error is %s\" , traceback.format_exc() )\n\n\n# --------------------- rotating handlers (for big application keep latest logs)-------------------------\nfrom logging.handlers import RotatingFileHandler , TimedRotatingFileHandler\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\n# roll over after 2KB and keep backup logs app.log , app.log2\nhandler = RotatingFileHandler( 'app.log' , maxBytes=2000 , backupCount=5 )\n\n# roll over preset time s,m.h,d,midnight \nhandler_time = TimedRotatingFileHandler( 'timed_test.log', \n when='s', \n interval=5 , \n backupCount=5 )\n\nlogger.addHandler(handler)\n\nfor _ in range(1000):\n logger.info('helo , world !')\n\n\n\n\n\n\n\n","repo_name":"himasha0421/Advance-Python-World","sub_path":"logging_styles/logging_func.py","file_name":"logging_func.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"9543602741","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 31 16:13:15 2022\n\n@author: Elliot\n\nThis exercise uses Object Oriented Programming to create an Agent Based Model \n(A model showing sheeps interaction with an environment)\n\nSteps \n\n1. Import Libaries and files being worked with. (Line 29 - 38) \nThe agentframe and environ files are integral parts of this \nmodel which allow it to run. Importing them here allows Model to read in code from these files. \n\nThe matplotlib library allow us to visualise model (Line 147 - 160)\n\n2. Create Agents and Wolf- See Agentframework.py \n\n3. Function to find distance bewteen agents.\nFor function, distance_between, the program goes through rows x and y and calculates \nthe distance using the Pythagoras' theorem.\n\n4. Create Environment. - See environ.py \n\n5. Run Agent_Based_Model \n\"\"\"\n# Imports Libraries \nimport matplotlib\nmatplotlib.use('TkAgg') #TkAgg renders data to a tk Canvas\nimport tkinter \nimport matplotlib.pyplot\nimport agentframework\nimport environ\nimport matplotlib.animation\nimport random\nimport requests\nimport bs4\n\n\n#Defining Variables \nnum_of_iterations = 500 # Number of times model runs \nnum_of_agents = 20 # Agents in model \nneighbourhood = 10 \nagents= [] # Creates List of Agents.\nwolves=[] # Creates List of wolves.\n\n\n\"\"\"\nWebScrapping \n\nData from the site stated in the website below is drawn into this model\nThis data is assigned to x and y values determining its initial starting positon of Agents. \n\"\"\"\n\nr = requests.get('http://www.geog.leeds.ac.uk/courses/computing/practicals/python/agent-framework/part9/data.html')\ncontent = r.text\nsoup = bs4.BeautifulSoup(content, 'html.parser')\ntd_ys = soup.find_all(attrs={\"class\" : \"y\"})\ntd_xs = soup.find_all(attrs={\"class\" : \"x\"})\n\n\"\"\"Web Scrapping test\"\"\"\nprint(td_ys, td_xs) \n\n\n#Reads in environment\nenvironment = environ.readEnvironment()\n\n\n\"\"\"Environment Test\"\"\" \n#a = agentframework.Agent(environment)\n#print(a._y, a._x)\n\n\n#Define parameters of Canvas\nfig = matplotlib.pyplot.figure(figsize=(7, 7))\nax = fig.add_axes([0, 0, 1, 1])\n\n\n\"\"\" \nAgents are created and enabled to interact with its environment\nThe loop goes through the number of agents, determines its starting positions of agents and creates it by drawing out attributes \nfrom agentframework. It them appends it to the list forming agents\n\n\"\"\" \nfor i in range (num_of_agents): #Creats a loop going through number of agents \n y = int(td_ys[i].text)\n x = int(td_xs[i].text)\n \n#Creates agent instance on line 89 while attaching the elements environment and agents.\n sheep = agentframework.Agent(i,environment, agents, x, y)\n agents.append(sheep) \n \n\"\"\" \nOutside the loop we create a wolf which goes through the same motion of drowing out its attributes from the framework and \ncreating a wolf\n\n\"\"\" \n \nwolve= agentframework.Agent(num_of_agents, environment, agents, x, y)\nwolves.append(wolve)\n\n\ndef update(frame_number):\n\n \"\"\"Once canvas is drawn. It updates the canvas per frame_number. \n \n If the arguement frame_number isn't passed frame is not updated.\n \n Parameters\n ----------\n frame_number : int,\n Number of times frames after which canvas is cleared and redrawn \n ------\n \"\"\"\n fig.clear() \n global carry_on\n \n\n#Agentframework Tasks \n random.shuffle(agents) # Agent order is randomized \n \n#Loops through list of agents and calls agent methods defined in agentframework. This gives agency to created agents \n for i in range (num_of_agents):\n #print (agents[i].i)\n \n agent = agents[i]\n#This if statement is a condition which states if agents are a live, show them on screen. If they die, take them off screen.\n if agent.living:\n agent.move()\n agent.eat()\n agent.shared_neigbourhood(neighbourhood)\n#Wolves are outside the if statement because they are alive and doing the killing\n wolves[0].move()\n wolves[0].eatsheep(neighbourhood)\n \n#what does this do ?????????????????????? \n if random.random() < 0.1:\n carry_on = False\n #print(\"stopping condition\")\n else:\n carry_on = True\n #print(\"Continuing\")\n\n\n#Displays Environment \n matplotlib.pyplot.ylim(0,250) \n matplotlib.pyplot.xlim(0,250)\n matplotlib.pyplot.imshow(environment)\n matplotlib.pyplot.xlabel('Sheep are white, Wolf is brown')\n matplotlib.pyplot.title(label=\"Sheep in trouble, Wolf on the loose\",\n loc=\"center\",\n fontstyle='italic')\n \n \n#Displays agents on Environment\n for i in range (num_of_agents):\n if agents[i].living:\n matplotlib.pyplot.scatter(agents[i]._y,agents[i]._x,c=\"white\") # y and x points (Sheep) plots on map, colour white\n matplotlib.pyplot.scatter(wolves[0]._y,wolves[0]._x,c=\"brown\") # y and x points plot on map (wolf), colour brown\n\n\nmatplotlib.pyplot.show()\n\n\"\"\"\nCreates Graphic User Invterface. Run function and quit function are commands which all model to be run and stopped from GUI\ntkinter.Menue creates the menu bar with model_menu.add_commands creating button to execute function defined above\n\"\"\"\n\ndef run(): \n \n \"\"\"This runs simulation. \n \n Parameters\n ----------\n Does not take any parameters.\n ------\n \"\"\"\n animation = matplotlib.animation.FuncAnimation(fig, update, interval=1, repeat=False, frames=50)\n canvas.draw()\n\ndef quit():\n \n \"\"\"This stops the simulation.\n \n Parameters\n ----------\n Does not take any parameters. \n ------\n \"\"\"\n global root\n root.quit()\n\n'''Creates GUI'''\n\nroot = tkinter.Tk() \nroot.wm_title(\"Model\")\ncanvas = matplotlib.backends.backend_tkagg.FigureCanvasTkAgg(fig, master=root,)\ncanvas._tkcanvas.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)\n \nmenu_bar = tkinter.Menu(root)\nroot.config(menu=menu_bar)\nmodel_menu = tkinter.Menu(menu_bar)\nmenu_bar.add_cascade(label=\"Model\", menu=model_menu)\nmodel_menu.add_command(label=\"Run model\", command=run, state=\"normal\") \nmodel_menu.add_command(label=\"Clear model\", command=quit, state=\"normal\")\n\n\ntkinter.mainloop()\n\n\n\n \n","repo_name":"elliotkarikari/MSc-Submission-ABM","sub_path":"GIS Workbook/Model_Based_Agents.py","file_name":"Model_Based_Agents.py","file_ext":"py","file_size_in_byte":6181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"3301767175","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport torch\nfrom gym_chess_env import ChessBoard_gym\nfrom agent_chess_pytorch import DQN\nimport numpy as np\nimport math\nimport chess\n\n\n\n\n# In[2]:\nclass Gen_Legal_move:\n def __init__(self, model_weights=\"checkpoint.pth-4rook_best-adamw.tar\"):\n super(Gen_Legal_move, self).__init__()\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = DQN(8,8,112).to(device)\n model = load_from_saved_model(model,model_weights)\n \n def load_from_saved_model(model, path = \"checkpoint.pth.tar\"):\n checkpoint = torch.load(path)\n model.load_state_dict(checkpoint['state_dict'])\n return(model)\n\n def generate_legal_moves(board, num_moves):\n state = torch.from_numpy(env.reset()).float()\n env = ChessBoard_gym()\n env.set_board(board)\n starting_pos_FEN = env.get_FEN()\n\n observation_space = 64\n state_model_input = torch.reshape(state, [1, observation_space])\n \n action_id = model(state_model_input).argmax(1)[0].detach()\n legal_move_ids = []\n for i in range(0,num_moves):\n next_state,reward, _, _ = env.step(action_id)\n next_state_model_input = torch.from_numpy(next_state).float()\n next_state_model_input = torch.reshape(next_state_model_input, [1, observation_space])\n action_id = actions_list.argmax(1)[0].detach()\n legal_move_ids.append(action_id)\n\n return(legal_move_ids)\n\n","repo_name":"pnarsina/w251_chess_objectid_n_rl","sub_path":"rl_model_generate_legalMoves.py","file_name":"rl_model_generate_legalMoves.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"35331433090","text":"from PygameManager import PygameManager\nfrom QueueStepper import QueueStepper\nimport pygame\n\npygameManager = PygameManager(pygame)\npygameManager.setup()\n\nqueueStepper = QueueStepper(\"Chapters.json\", pygame)\n\nprogrammAktiv = True\n\nwhile (programmAktiv and not queueStepper.isEndReached()):\n for event in pygame.event.get():\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_q:\n programmAktiv = False\n if event.key == pygame.K_a:\n queueStepper.nextStep()\n pygameManager.clockTick()\npygameManager.teardown()","repo_name":"Sosian/VoiceControlBachelorThesis","sub_path":"VoiceControl.py","file_name":"VoiceControl.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"5165439530","text":"import os\nimport pandas as pd\nimport numpy as np\nimport geopandas as gpd\nimport matplotlib.pyplot as plt\nimport plotly.express as px\n\nfrom inspect import getsourcefile\nfrom os.path import abspath\n\nfrom transit_score import transit_score\n\n#set active directory to file location\ndirectory = abspath(getsourcefile(lambda:0))\n#check if system uses forward or backslashes for writing directories\nif(directory.rfind(\"/\") != -1):\n newDirectory = directory[:(directory.rfind(\"/\")+1)]\nelse:\n newDirectory = directory[:(directory.rfind(\"\\\\\")+1)]\nos.chdir(newDirectory)\n\ndef create_property_scores():\n #list of geodataframes - each one is a different amenity\n amenities = []\n \n #import ammenities: bus stops, grocery stores, hospitals, etc. \n #list of files in 'amenity data'\n amenity_files = os.listdir('amenity data')\n for file in amenity_files:\n df = pd.read_csv('amenity data/'+file)\n #convert to gdf using Latitude\tLongitude\n gdf = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.Longitude, df.Latitude))\n gdf['category'] = file[:-4]\n #wgs84 is the standard lat/long coordinate system\n gdf.crs = 'epsg:4326'\n #convert to NAD UTM 10N\n gdf = gdf.to_crs('epsg:26910')\n amenities.append(gdf)\n\n properties = gpd.read_file(\"CRD Properties/core muni properties dissolved.geojson\")\n #drop all columns except geometry and AddressCombined\n properties = properties[['geometry', 'AddressCombined']]\n\n properties = properties.to_crs('epsg:26910')\n\n #check for invalid geometries\n properties = properties[properties.is_valid]\n\n #calculate transit score\n print(\"Calculating transit score...\")\n properties = transit_score(properties)\n\n #reset index\n properties = properties.reset_index()\n \n for amenity in amenities:\n category = amenity['category'][0]\n properties[category] = 0\n\n buffer = gpd.GeoDataFrame(geometry=amenity.buffer(800,resolution=1))\n\n # Perform a spatial join operation between the two datasets\n properties_within_buffer = gpd.sjoin(properties, buffer, predicate='intersects')\n\n # Create a new column called category and assign a value of 1 to all rows\n properties_within_buffer[category] = 1\n\n # Update the 'amenity' column in the original properties dataset for the properties within the buffer\n properties.loc[properties_within_buffer.index, category] = 1\n\n print(\"Analyzed {}. {} amenities in dataset, {} properties within 800m buffer.\".format(category, len(amenity), len(properties_within_buffer))) \n\n properties = properties.to_crs('epsg:4326') \n\n #MERGE WITH HFL ZONING DATA\n\n #import zoning data\n zoning = gpd.read_file('zoning/Harmonized_Zones.shp')\n zoning = zoning[['SIMPLIFIED', 'geometry']]\n zoning = zoning.to_crs('epsg:4326')\n zoning = zoning.rename(columns={'SIMPLIFIED': 'Current Zoning'})\n\n #perform spatial join. Find zoning for each property, create 'zone' column in properties with zoning.\n #zoning has a 'SIMPLIFIED' column. This is the zoning type.\n print(len(properties))\n\n original_geometry = properties.geometry\n \n #Zoning maps being aligned with the edge of properties is causing multiple zones to be assigned to each property.\n #Scaling properties down by 70% to fix most of this. Doesn't always work.\n\n properties.geometry = properties.geometry.scale(xfact=0.7, yfact=0.7, zfact=0.7, origin=\"centroid\")\n properties = gpd.sjoin(properties, zoning, how='left', predicate='intersects')\n properties.geometry = original_geometry\n\n #if there's multiple rows with the same geometry/Address, go with the first one. There's a few edge cases where this happens and it's on my list of things to investigate.\n properties = properties.drop_duplicates(subset=['geometry'], keep='first')\n properties = properties.drop_duplicates(subset=['AddressCombined'], keep='first')\n\n print(len(properties))\n properties = properties.reset_index(drop=True)\n properties = properties.drop(columns=['index_right'])\n\n properties.to_file(\"CRD Properties/scored_properties.geojson\", driver='GeoJSON')\n\n return\n\ndef aggregate_amenities(properties):\n \n #import weights\n weights = pd.read_csv('amenity weights.csv')\n\n properties['amenity_score'] = 0\n\n #for coloumns that aren't index, AddressCombined, transit_score, or geometry:\n #multiply by weight\n #add to amenity_score\n properties = properties.to_crs('epsg:4326')\n\n for col in properties.columns:\n if(col not in ['index', 'AddressCombined', 'transit_score', 'geometry','amenity_score', 'Current Zoning']):\n print(col)\n w = weights[weights['amenity'] == col]['weight'].values[0]\n properties[col] = properties[col].astype(int)\n properties['amenity_score'] = properties['amenity_score'] + w*properties[col]\n \n #normalize amenity score from 0 to 1\n properties['amenity_score'] = properties['amenity_score']/properties['amenity_score'].max()\n \n #transit_score is from 0 to 1. arbitrary weights\n properties['OCP Score'] = 0.5*properties['transit_score'] + 0.5*properties['amenity_score']\n\n properties = properties[['geometry', 'AddressCombined', 'amenity_score', 'transit_score', 'OCP Score', 'Current Zoning']]\n\n #multiply by 100 and then round to an integer using round()\n properties['amenity_score'] = round(100*properties['amenity_score'])\n properties['transit_score'] = round(100*properties['transit_score'])\n properties['OCP Score'] = round(100*properties['OCP Score'])\n\n return(properties)\n\n#call this function before running mapping\n\n#create_property_scores()","repo_name":"homesforliving/OCP-Reform","sub_path":"OCP_score_generation.py","file_name":"OCP_score_generation.py","file_ext":"py","file_size_in_byte":5698,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"698167110","text":"import typing\r\nimport asyncio\r\nimport logging\r\nimport enum\r\nimport time\r\n\r\n# wsproto\r\nimport wsproto\r\nimport wsproto.events\r\n\r\nfrom hatsu.types import Scope, Transport, Protocol\r\nfrom hatsu.utils import get_addr, get_scheme\r\n\r\nif typing.TYPE_CHECKING:\r\n from hatsu.core.server import Server\r\n\r\nfrom urllib.parse import unquote\r\n\r\nclass WebsocketState(enum.Enum):\r\n HANDSHAKE = 0\r\n CONNECTED = 1\r\n CLOSED = 2\r\n\r\nclass WSProtoImpl(Protocol):\r\n \"\"\"A ws implementation using `asyncio.Protocol`.\"\"\"\r\n\r\n def __init__(self, server: \"Server\") -> None:\r\n self.server = server\r\n\r\n # Logging.\r\n self.logger = logging.getLogger('hatsu.protocols.websocket_wsproto')\r\n\r\n # Asyncio.\r\n self.loop = asyncio.get_event_loop()\r\n\r\n # Connection state.\r\n self.transport = None\r\n self.peername = None\r\n self.sockname = None\r\n self.connection = wsproto.WSConnection(\r\n wsproto.ConnectionType.SERVER\r\n )\r\n self.queue = asyncio.Queue()\r\n self.handshake_complete = False\r\n\r\n # Send and recv state.\r\n self.start_end = False\r\n self.body_end = False\r\n self.send_close = False\r\n self.state = WebsocketState.HANDSHAKE\r\n\r\n # Ping control.\r\n self.last_ping = time.time()\r\n\r\n # Flow control.\r\n self.write_pause = False\r\n self.read_pause = False\r\n\r\n # Buffer.\r\n self.text = \"\"\r\n self.bytes = b\"\"\r\n\r\n def connection_made(self, transport: Transport) -> None:\r\n # When a connection is initialized\r\n # it save the `transport` to the class\r\n # so it can be used later.\r\n\r\n self.server.connections.add(self)\r\n\r\n self.transport = transport\r\n self.schme = get_scheme(self.transport, type=\"websocket\")\r\n self.peername = get_addr(self.transport, type=\"peername\")\r\n self.sockname = get_addr(self.transport, type=\"sockname\")\r\n\r\n self.logger.debug('Connection made.')\r\n\r\n def connection_lost(self, exc: typing.Optional[Exception]) -> None:\r\n # When a connection is closed\r\n # it updates the `request` state\r\n # to `disconnected.`\r\n\r\n self.server.connections.discard(self)\r\n self.logger.debug(\"Connection closed.\")\r\n\r\n if self.read_pause is True:\r\n self.read_pause = False\r\n self.transport.resume_reading()\r\n\r\n self.transport.close()\r\n\r\n def data_received(self, data: bytes) -> None:\r\n # Called when the packet is complete.\r\n\r\n if len(data) > self.server.ws_max_size:\r\n raise ValueError(\"Data is too big.\")\r\n\r\n self.connection.receive_data(data)\r\n\r\n def __request__(event: wsproto.events.Request):\r\n # Called when the request is complete\r\n # it also build the scope.\r\n\r\n self.handshake_complete = True\r\n\r\n headers = [\r\n (b\"host\", event.host.encode(\"utf-8\")),\r\n *event.extra_headers\r\n ]\r\n raw_path, _, query_string = event.target.partition(\"?\")\r\n self.scope: \"Scope\" = {\r\n \"type\": \"websocket\",\r\n \"asgi\": {\r\n \"version\": \"3\",\r\n \"spec_version\": \"2.3\"\r\n },\r\n \"http_version\": \"1.1\",\r\n \"scheme\": self.schme,\r\n \"path\": unquote(raw_path),\r\n \"query_string\": query_string,\r\n \"root_path\": self.server.root_path,\r\n \"headers\": headers,\r\n \"client\": self.peername,\r\n \"server\": self.sockname,\r\n \"subprotocls\": event.subprotocols\r\n }\r\n message = {\r\n \"type\": \"websocket.connect\"\r\n }\r\n self.queue.put_nowait(message)\r\n\r\n if self.server.limit_concurrency is not None:\r\n if len(self.server.connections) >= self.server.limit_concurrency \\\r\n or len(self.server.tasks) >= self.server.limit_concurrency:\r\n self.logger.warning(\"Exceeded concurrency limit.\")\r\n\r\n self.app = self.server.application\r\n\r\n task = self.loop.create_task(\r\n self.app(\r\n self.scope, self.asgi_recv, self.asgi_send\r\n )\r\n )\r\n task.add_done_callback(self.server.tasks.discard)\r\n self.server.tasks.add(task)\r\n\r\n def __byte_message__(event: wsproto.events.BytesMessage):\r\n # Called when the client sends a message.\r\n self.bytes += event.data\r\n\r\n if event.message_finished is True:\r\n message = {\r\n \"type\": \"websocket.receive\",\r\n \"bytes\": self.bytes\r\n }\r\n self.queue.put_nowait(message)\r\n self.bytes = b\"\"\r\n\r\n def __text_message__(event: wsproto.events.TextMessage):\r\n # Called when the client sends a message.\r\n self.text += event.data\r\n\r\n if event.message_finished is True:\r\n message = {\r\n \"type\": \"websocket.receive\",\r\n \"text\": self.text\r\n }\r\n self.queue.put_nowait(message)\r\n self.text = \"\"\r\n\r\n def __close__(event: wsproto.events.CloseConnection):\r\n # Called when the connection is about to close.\r\n\r\n message = {\r\n \"type\": \"websocket.disconnect\",\r\n \"code\": event.code\r\n }\r\n self.queue.put_nowait(message)\r\n self.transport.close()\r\n\r\n def __ping__(event: wsproto.events.Ping):\r\n # Called when the server recv ping.\r\n\r\n if time.time() - (self.last_ping - self.server.ws_ping_interval):\r\n return\r\n\r\n self.transport.write(\r\n self.conn.send(event.response())\r\n )\r\n\r\n def __close__(event: wsproto.events.CloseConnection):\r\n # Called when the server recv close.\r\n\r\n message = {\r\n \"type\": \"websocket.disconnect\",\r\n \"code\": event.code\r\n }\r\n self.queue.put_nowait(message)\r\n self.transport.close()\r\n\r\n handlers = {\r\n wsproto.events.Request: __request__,\r\n wsproto.events.TextMessage: __text_message__,\r\n wsproto.events.BytesMessage: __byte_message__,\r\n wsproto.events.Ping: __ping__,\r\n wsproto.events.CloseConnection: __close__,\r\n }\r\n\r\n for event in self.connection.events():\r\n event_type = type(event)\r\n handlers[event_type](event)\r\n\r\n async def asgi_send(self, message):\r\n # Send interface for the application.\r\n\r\n if self.state is WebsocketState.HANDSHAKE:\r\n if message[\"type\"] != \"websocket.accept\":\r\n # TODO: MAKE IT BETTER.\r\n raise ValueError(\"Can't send this message in that state.\")\r\n\r\n subprotocls = message.get(\"subprotocls\")\r\n event = wsproto.events.AcceptConnection(\r\n subprotocol=subprotocls\r\n )\r\n self.transport.write(\r\n self.connection.send(event=event)\r\n )\r\n self.start_end = True\r\n self.state = WebsocketState.CONNECTED\r\n\r\n self.logger.debug(\r\n f'Websocket handshake complete. ({self.peername})'\r\n )\r\n\r\n elif self.state is WebsocketState.CONNECTED:\r\n if message[\"type\"] not in (\"websocket.send\", \"websocket.close\"):\r\n # TODO: MAKE IT BETTER.\r\n raise ValueError(\"Can't send this message in that state.\")\r\n\r\n if message[\"type\"] == \"websocket.close\":\r\n code, reason = message.get(\"code\"), \\\r\n message.get(\"reason\", \"\")\r\n event = wsproto.events.CloseConnection(\r\n code=code, reason=reason\r\n )\r\n message = {\r\n \"type\": \"websocket.disconnect\",\r\n \"code\": code\r\n }\r\n\r\n self.queue.put_nowait(message)\r\n if self.transport.is_closing() is False:\r\n self.transport.write(\r\n self.connection.send(event=event)\r\n )\r\n self.transport.close()\r\n\r\n if self.transport.is_closing() is False:\r\n text = message.get('text', None)\r\n bytes = message.get('bytes', None)\r\n\r\n if text is not None:\r\n event = wsproto.events.TextMessage(\r\n data=text\r\n )\r\n elif bytes is not None:\r\n event = wsproto.events.BytesMessage(\r\n data=bytes\r\n )\r\n\r\n self.transport.write(\r\n self.connection.send(event=event)\r\n )\r\n\r\n async def asgi_recv(self):\r\n # Recv interface for the application.\r\n\r\n message = await self.queue.get()\r\n if self.read_pause is True and self.queue.empty():\r\n self.read_pause = False\r\n self.transport.resume_reading()\r\n return message\r\n\r\n def close_protocol(self):\r\n # To close the connection when clean up.\r\n\r\n if self.handshake_complete is True:\r\n message = {\"type\": \"websocket.disconnect\", \"code\": 1012}\r\n self.queue.put_nowait(message)\r\n\r\n event = wsproto.events.CloseConnection(code=1012)\r\n self.transport.write(\r\n self.connection.send(event=event)\r\n )\r\n else:\r\n headers = [\r\n (b\"content-type\", b\"text/plain; charset=utf-8\"),\r\n (b\"connection\", b\"close\"),\r\n ]\r\n output = self.connection.send(\r\n wsproto.events.RejectConnection(\r\n status_code=500, headers=headers, has_body=True\r\n )\r\n )\r\n output += self.connection.send(\r\n wsproto.events.RejectData(data=b\"Internal Server Error\")\r\n )\r\n self.transport.write(output)\r\n self.transport.close()\r\n","repo_name":"ArtyTheDev/hatsu","sub_path":"hatsu/protocols/websocket_wsproto.py","file_name":"websocket_wsproto.py","file_ext":"py","file_size_in_byte":10294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"9430313389","text":"\nfrom os import stat\nimport numpy as np\nimport pyro\nfrom pyro import poutine\nimport pyro.distributions as dist\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.distributions.constraints as constraints\nfrom tqdm import tqdm\nimport warnings\n\nfrom scipy.sparse import isspmatrix\nfrom kladi.matrix_models.scipm_base import BaseModel, get_fc_stack\nimport configparser\nimport requests\nimport json\nfrom itertools import zip_longest\nimport matplotlib.pyplot as plt\nimport logging\nfrom math import ceil\nfrom kladi.core.plot_utils import map_plot\nfrom functools import partial\nfrom kladi.matrix_models.scipm_base import Decoder\nfrom pyro.contrib.autoname import scope\n\nconfig = configparser.ConfigParser()\nconfig.read('kladi/matrix_models/config.ini')\n\nlogger = logging.getLogger(__name__)\n\ndef grouper(iterable, n, fillvalue=None):\n \"Collect data into fixed-length chunks or blocks\"\n # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx\"\n args = [iter(iterable)] * n\n return zip_longest(*args, fillvalue=fillvalue)\n\ndef compact_string(x, max_wordlen = 4, join_spacer = ' ', sep = ' '):\n return '\\n'.join(\n [\n join_spacer.join([x for x in segment if not x == '']) for segment in grouper(x.split(sep), max_wordlen, fillvalue='')\n ]\n )\n\nclass GeneDevianceModel:\n\n def __init__(self, highly_variable):\n self.highly_variable = highly_variable\n\n def fit(self, y_ij):\n \n y_ij = y_ij[:, self.highly_variable]\n self.pi_j_hat = y_ij.sum(axis = 0)/y_ij.sum()\n\n return self\n\n def set_pi(self, pi):\n self.pi_j_hat = pi\n\n def transform(self, y_ij):\n \n y_ij = y_ij[:, self.highly_variable]\n \n n_i = y_ij.sum(axis = 1, keepdims = True)\n\n mu_ij_hat = n_i * self.pi_j_hat[np.newaxis, :]\n\n count_dif = n_i - y_ij\n expected_count_dif = n_i - mu_ij_hat\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n\n r_ij = np.multiply(\n np.sign(y_ij - mu_ij_hat), \n np.sqrt(\n np.where(y_ij > 0, 2 * np.multiply(y_ij, np.log(y_ij / mu_ij_hat)), 0) + \\\n 2 * np.multiply(count_dif, np.log(count_dif / expected_count_dif))\n )\n )\n\n return np.clip(np.nan_to_num(r_ij), -10, 10)\n\n\nclass ExpressionEncoder(nn.Module):\n\n def __init__(self, num_genes, num_topics, hidden, dropout, num_layers):\n super().__init__()\n output_batchnorm_size = 2*num_topics + 2\n\n self.num_topics = num_topics\n self.fc_layers = get_fc_stack(\n layer_dims = [num_genes + 1, *[hidden]*(num_layers-1), output_batchnorm_size],\n dropout = dropout, skip_nonlin = True\n )\n \n def forward(self, X):\n\n X = self.fc_layers(X)\n\n theta_loc = X[:, :self.num_topics]\n theta_scale = F.softplus(X[:, self.num_topics:(2*self.num_topics)])# + 1e-5\n\n rd_loc = X[:,-2].reshape((-1,1))\n rd_scale = F.softplus(X[:,-1]).reshape((-1,1))# + 1e-5\n\n return theta_loc, theta_scale, rd_loc, rd_scale\n\n\nclass ExpressionModel(BaseModel):\n '''\n Class\n '''\n \n def __init__(self, genes, highly_variable = None, num_modules = 15, decoder_dropout = 0.2, \n encoder_dropout = 0.1, hidden = 128, use_cuda = True, num_layers = 3, seed = None):\n '''\n Initialize ExpressionModel instance. \n\n Example:\n\n >> genes[:3]\n ['GATA3', 'WNT3', 'CDK8']\n\n >> highly_variable[:3]\n np.array([True, False, False], dtype = bool)\n\n >> expr_model = ExpressionModel(genes, highly_variable = highly_variable, num_modules = 10)\n\n\n Args:\n genes (list, np.ndarray): Gene names / column names for count matrix, length must match dimension 2 of count matrix\n highly_variable (np.ndarray): boolean mask of same length as ``genes``. Genes flagged with ``True`` will be used as features for encoder. All genes will be used as features for decoder.\n This allows one to impute many genes while only learning modules on highly variable genes, decreasing model complexity and training time.\n num_modules (int): number of gene modules to find\n initial_counts (int): sparsity parameter, related to pseudocounts of dirichlet prior. Increasing will lead to denser cell latent variables, decreasing will lead to more sparse latent variables.\n dropout (float between 0,1): dropout rate for model.\n hidden (int): number of nodes in encoder hidden layers.\n use_cuda (bool): use CUDA to accelerate training on GPU (if GPU is available).\n\n Returns:\n ExpressionModel\n '''\n\n assert(isinstance(genes, (list, np.ndarray)))\n self.genes = np.ravel(np.array(genes))\n \n kwargs = dict(\n num_modules = num_modules,\n num_exog_features = len(self.genes),\n highly_variable = highly_variable,\n hidden = hidden,\n num_layers = num_layers,\n decoder_dropout = decoder_dropout,\n encoder_dropout = encoder_dropout,\n use_cuda = use_cuda,\n seed = seed,\n )\n \n super().__init__(ExpressionEncoder, Decoder, **kwargs)\n\n @scope(prefix= 'rna')\n def model(self, raw_expr, encoded_expr, read_depth, anneal_factor = 1.):\n\n pyro.module(\"decoder\", self.decoder)\n\n self.dispersion = pyro.param(\"dispersion\", torch.tensor(5.) * torch.ones(self.num_exog_features), \n constraint = constraints.positive).to(self.device)\n\n _alpha, _beta = self._get_gamma_parameters(self.I, self.num_topics)\n with pyro.plate(\"topics\", self.num_topics):\n initial_counts = pyro.sample(\"a\", dist.Gamma(self._to_tensor(_alpha), self._to_tensor(_beta)))\n\n theta_loc = self._get_prior_mu(initial_counts, self.K)\n theta_scale = self._get_prior_std(initial_counts, self.K)\n\n #print(theta_loc, theta_scale)\n \n with pyro.plate(\"cells\", encoded_expr.shape[0]):\n\n # Dirichlet prior 𝑝(𝜃|𝛼) is replaced by a log-normal distribution\n with poutine.scale(None, anneal_factor):\n theta = pyro.sample(\n \"theta\", dist.LogNormal(theta_loc, theta_scale).to_event(1))\n\n read_scale = pyro.sample('read_depth', dist.LogNormal(torch.log(read_depth), 1.).to_event(1))\n\n theta = theta/theta.sum(-1, keepdim = True)\n expr_rate = self.decoder(theta)\n\n mu = torch.multiply(read_scale, expr_rate)\n p = torch.minimum(mu / (mu + self.dispersion), self.max_prob)\n\n pyro.sample('obs', dist.NegativeBinomial(total_count = self.dispersion, probs = p).to_event(1), obs = raw_expr)\n\n @scope(prefix= 'rna')\n def guide(self, raw_expr, encoded_expr, read_depth, anneal_factor = 1.):\n\n pyro.module(\"encoder\", self.encoder)\n\n _counts_mu, _counts_var = self._get_lognormal_parameters_from_moments(*self._get_gamma_moments(self.I, self.num_topics))\n counts_mu = pyro.param('counts_mu', _counts_mu * encoded_expr.new_ones((self.num_topics,))).to(self.device)\n counts_std = pyro.param('counts_std', np.sqrt(_counts_var) * encoded_expr.new_ones((self.num_topics,)), \n constraint = constraints.positive).to(self.device)\n\n with pyro.plate(\"topics\", self.num_topics) as k:\n initial_counts = pyro.sample(\"a\", dist.LogNormal(counts_mu[k], counts_std[k]))\n\n \n with pyro.plate(\"cells\", encoded_expr.shape[0]):\n # Dirichlet prior 𝑝(𝜃|𝛼) is replaced by a log-normal distribution,\n # where μ and Σ are the encoder network outputs\n theta_loc, theta_scale, rd_loc, rd_scale = self.encoder(encoded_expr)\n\n with poutine.scale(None, anneal_factor):\n theta = pyro.sample(\n \"theta\", dist.LogNormal(theta_loc, theta_scale).to_event(1)\n )\n\n read_depth = pyro.sample(\n \"read_depth\", dist.LogNormal(rd_loc.reshape((-1,1)), rd_scale.reshape((-1,1))).to_event(1)\n )\n \n\n def _get_expression_distribution_parameters(self, raw_expr, batch_size = 32):\n \n def detach(x):\n return x.detach().cpu().numpy()\n\n X = self._validate_data(raw_expr)\n assert(isinstance(batch_size, int) and batch_size > 0)\n\n rd_locs, rd_scales, softmax_denoms = [], [], []\n for i,batch in enumerate(self._get_batches(X, batch_size = batch_size)):\n raw_expr, encoded_expr, read_depth = batch\n theta_loc, theta_scale, rd_loc, rd_scale = self.encoder(encoded_expr)\n\n rd_locs.append(detach(rd_loc))\n rd_scales.append(detach(rd_scale))\n\n theta = theta_loc.exp()/theta_loc.exp().sum(-1, keepdim = True)\n softmax_denoms.append(\n detach(self.decoder.get_softmax_denom(theta))\n )\n\n rd_loc = np.concatenate(rd_locs, 0)\n rd_scale = np.concatenate(rd_scales, 0)\n softmax_denom = np.concatenate(softmax_denoms, 0)\n return rd_loc, rd_scale, softmax_denom\n\n\n def _get_latent_MAP(self, raw_expr, encoded_expr, read_depth):\n theta_loc, theta_scale, rd_loc, rd_scale = self.encoder(encoded_expr)\n\n Z = theta_loc.cpu().detach().numpy()\n return np.exp(Z)/np.exp(Z).sum(-1, keepdims = True)\n\n\n def _get_batches(self, count_matrix, batch_size = 32, bar = False, training = True, desc = None):\n \n N = len(count_matrix)\n \n try:\n self.deviance_model\n except AttributeError:\n self.deviance_model = GeneDevianceModel(self.highly_variable).fit(count_matrix)\n\n for batch_start, batch_end in self._iterate_batch_idx(N, batch_size):\n yield self._featurize(count_matrix[batch_start : batch_end, :])\n\n def _validate_data(self, X):\n assert(isinstance(X, np.ndarray) or isspmatrix(X))\n \n if isspmatrix(X):\n X = np.array(X.todense())\n\n assert(len(X.shape) == 2)\n assert(X.shape[1] == self.num_exog_features)\n \n assert(np.isclose(X.astype(np.int64), X, 1e-1).all()), 'Input data must be raw transcript counts, represented as integers. Provided data contains non-integer values.'\n\n return X.astype(np.float32)\n\n def impute(self, latent_compositions):\n '''\n Compute imputed gene expression values using cells' latent variable representations.\n\n Args:\n latent_compositions (np.npdarray): Cells x num_modules array, each row must sum to 1\n\n Returns:\n (np.ndarray): imputed expression, Cells x num_genes matrix\n '''\n\n assert(isinstance(latent_compositions, np.ndarray))\n assert(len(latent_compositions.shape) == 2)\n assert(latent_compositions.shape[1] == self.num_topics)\n assert(np.isclose(latent_compositions.sum(-1), 1).all())\n\n latent_compositions = self._to_tensor(latent_compositions)\n\n return self.decoder(latent_compositions).cpu().detach().numpy()\n\n def _get_save_data(self):\n return dict(\n pi = self.deviance_model.pi_j_hat,\n **super()._get_save_data()\n )\n\n def _load_save_data(self, data):\n super()._load_save_data(data)\n\n self.deviance_model = GeneDevianceModel(self.highly_variable)\n self.deviance_model.set_pi(data['pi'])\n\n return self\n\n\n def _featurize(self, count_matrix):\n\n encoded_counts = self.deviance_model.transform(count_matrix)\n read_depth = count_matrix.sum(-1, keepdims = True)\n\n encoded_counts = np.hstack([encoded_counts, np.log(read_depth)])\n\n return self._to_tensor(count_matrix), self._to_tensor(encoded_counts), self._to_tensor(read_depth)\n\n def rank_genes(self, module_num):\n '''\n Ranks genes according to their activation in module ``module_num``. Sorted from most suppressed to most activated.\n\n Args:\n module_num (int): For which module to rank genes\n\n Returns:\n np.ndarray: sorted array of gene names in order from most suppressed to most activated given the specified module\n '''\n assert(isinstance(module_num, int) and module_num < self.num_topics and module_num >= 0)\n\n return self.genes[np.argsort(self._score_features()[module_num, :])]\n\n def get_top_genes(self, module_num, top_n = None):\n '''\n For a module, return the top n genes that are most activated.\n\n Args:\n module_num (int): For which module to return most activated genes\n top_n (int): number of genes to return\n\n Returns\n (np.ndarray): Names of top n genes, sorted from least to most activated\n '''\n\n if top_n is None:\n top_genes_mask = self._score_features()[module_num,:] > 2\n\n if top_genes_mask.sum() > 200:\n return self.genes[top_genes_mask]\n else:\n top_n = 200\n\n assert(isinstance(top_n, int) and top_n > 0)\n return self.rank_genes(module_num)[-top_n : ]\n\n\n def rank_modules(self, gene):\n '''\n For a gene, rank how much its expression is activated by each module\n\n Args:\n gene (str): name of gene\n \n Raises:\n AssertionError: if ``gene`` is not in self.genes\n \n Returns:\n (list): of format [(module_num, activation), ...]\n '''\n \n assert(gene in self.genes)\n\n gene_idx = np.argwhere(self.genes == gene)[0]\n return list(sorted(zip(range(self.num_topics), self._score_features()[:, gene_idx]), key = lambda x : x[1]))\n \n\n def post_genelist(self, module_num, top_n = None):\n '''\n Post genelist to Enrichr, recieve genelist ID for later retreival.\n\n Args:\n module_num (int): which module's top genes to post\n top_n_genes (int): number of genes to post\n\n Returns:\n enrichr_id (str): unique ID of genelist for retreival with ``get_enrichments`` or ``get_ontology``\n '''\n\n top_genes = '\\n'.join(self.get_top_genes(module_num, top_n=top_n))\n\n enrichr_url = config.get('Enrichr','url')\n post_endpoint = config.get('Enrichr','post')\n\n payload = {\n 'list': (None, top_genes),\n }\n\n logger.info('Querying Enrichr with module {} genes.'.format(str(module_num)))\n response = requests.post(enrichr_url + post_endpoint, files=payload)\n if not response.ok:\n raise Exception('Error analyzing gene list')\n\n list_id = json.loads(response.text)['userListId']\n return list_id\n\n def get_ontology(self, list_id, ontology = 'WikiPathways_2019_Human'):\n '''\n Fetches the gene-set enrichments for a genelist in a certain ontology from Enrichr\n\n Args:\n list_id (str): unique ID of genelist from ``post_genelist``\n ontology (str, default = Wikipathways_2019_Human): For which ontology to download results\n\n Returns:\n (dict): enrichments, with format:\n\n {\n ontology: {\n rank : [...],\n term : [...],\n pvalue : [...],\n zscore : [...],\n combined_score : [...],\n genes : [...],\n adj_pvalue : [...]\n }\n\n }\n '''\n\n enrichr_url = config.get('Enrichr','url')\n get_endpoint = config.get('Enrichr','get').format(list_id = list_id, ontology = ontology)\n\n response = requests.get(enrichr_url + get_endpoint)\n if not response.ok:\n raise Exception('Error fetching enrichment results')\n \n data = json.loads(response.text)[ontology]\n\n headers = config.get('Enrichr','results_headers').split(',')\n \n return {ontology : [dict(zip(headers, x)) for x in data]}\n\n\n def get_enrichments(self, list_id, ontologies = config.get('Enrichr','ontologies').split(',')):\n '''\n Fetches the gene-set enrichments for a genelist from ontologies listed\n\n Args:\n list_id (str): unique ID of genelist from ``post_genelist``\n ontologies (list, default in kladi/matrix_models/config.ini): or which ontologies to download results\n\n Returns:\n (dict): enrichments, with format:\n\n {\n ontology: {\n rank : [...],\n term : [...],\n pvalue : [...],\n zscore : [...],\n combined_score : [...],\n genes : [...],\n adj_pvalue : [...]\n }\n ...\n }\n '''\n\n logger.info('Downloading results ...')\n\n enrichments = dict()\n for ontology in ontologies:\n enrichments.update(self.get_ontology(list_id, ontology=ontology))\n\n return enrichments\n\n @staticmethod\n def _enrichment_plot(ax, ontology, results,*,\n text_color, show_top, barcolor, show_genes, max_genes):\n\n terms, genes, pvals = [],[],[]\n for result in results[:show_top]:\n \n terms.append(\n compact_string(result['term'])\n ) \n genes.append(' '.join(result['genes'][:max_genes]))\n pvals.append(-np.log10(result['pvalue']))\n \n ax.barh(np.arange(len(terms)), pvals, color=barcolor)\n ax.set_yticks(np.arange(len(terms)))\n ax.set_yticklabels(terms)\n ax.invert_yaxis()\n ax.set(title = ontology, xlabel = '-log10 pvalue')\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n \n if show_genes:\n for j, p in enumerate(ax.patches):\n _y = p.get_y() + p.get_height() - p.get_height()/3\n ax.text(0.1, _y, compact_string(genes[j], max_wordlen=10, join_spacer = ', '), ha=\"left\", color = text_color)\n\n\n def plot_enrichments(self, enrichment_results, show_genes = True, show_top = 5, barcolor = 'lightgrey',\n text_color = 'black', return_fig = False, enrichments_per_row = 2, height = 4, aspect = 2.5, max_genes = 15):\n '''\n Make plot of geneset enrichments given results from ``get_ontology`` or ``get_enrichments``.\n\n Example:\n\n post_id = expr_model.post_genelist(0) #post top 250 module 0 genes\n enrichments = expr_model.get_enrichments(post_id)\n expr_model.plot_enrichments(enrichments)\n\n Args:\n enrichment_results (dict): output from ``get_ontology`` or ``get_enrichments``\n show_genes (bool): overlay gene names on top of bars\n show_top (int): plot top n enrichment results\n barcolor (color): color of barplot bars\n text_color (text_color): color of text on barplot bars\n return_fig (bool): return fig and axes objects\n enrichments_per_row (int): number of plots per row\n height (float): height of each plot\n aspect (float): multiplier for width of each plot, width = aspect * height\n max_genes (int): maximum number of genes to display on bar\n\n Returns (if return_fig is True):\n matplotlib.figure, matplotlib.axes.Axes\n\n '''\n \n func = partial(self._enrichment_plot, text_color = text_color, \n show_top = show_top, barcolor = barcolor, show_genes = show_genes, max_genes = max_genes)\n\n fig, ax = map_plot(func, enrichment_results.keys(), enrichment_results.values(), plots_per_row = enrichments_per_row, \n height =height, aspect = aspect) \n \n plt.tight_layout()\n if return_fig:\n return fig, ax","repo_name":"AllenWLynch/Kladi","sub_path":"kladi/matrix_models/expression_model.py","file_name":"expression_model.py","file_ext":"py","file_size_in_byte":20162,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"7471928919","text":"from __future__ import print_function\nimport numpy as np\nimport yt\nimport powderday.config as cfg\n\nfrom yt.data_objects.particle_filters import add_particle_filter\n\n\ndef enzo_field_add(fname,ds = None, starages = False):\n\n def _starmetals(field,data):\n return data[('newstars','metallicity_fraction')]\n\n def _starcoordinates(field,data):\n #set units of cm then tack them back on because the column_stack loses them\n xpos = data[ ('newstars', 'particle_position_x')].in_units(\"cm\")\n ypos = data[ ('newstars', 'particle_position_y')].in_units(\"cm\")\n zpos = data[ ('newstars', 'particle_position_z')].in_units(\"cm\")\n coordinates = np.column_stack((xpos,ypos,zpos))\n coordinates = data.ds.arr(coordinates,\"cm\")\n return coordinates\n\n def _stellarages(field,data):\n age = ds.current_time.in_units('Gyr')-data[('newstars', 'creation_time')].in_units('Gyr')\n age[np.where(age < 1.e-3)[0]] = 1.e-3\n return age\n\n def _starmasses(field,data):\n return data[('newstars', 'particle_mass')]\n\n def _gasdensity(field,data):\n return data[('gas', 'density')]\n \n def _gasmetals(field,data):\n return data[ ('gas', 'metallicity')]\n\n def _gasmasses(field,data):\n return data[('gas','cell_mass')]\n\n def _gasfh2(field, data):\n try: return data[('gas', 'FractionH2')]\n except: return data[('gas', 'metallicity')]*0. #just some dimensionless array\n \n\n #load the ds\n if fname != None:\n ds = yt.load(fname)\n ds.index\n\n #set up particle_filters to figure out which particles are stars.\n #we'll call particles that have ages > 0 stars.\n\n def newstars(pfilter,data):\n age = data[pfilter.filtered_type,\"creation_time\"]\n filter = age.in_units('Gyr') > 0\n return filter\n\n\n \n add_particle_filter(\"newstars\",function=newstars,filtered_type='all',requires=[\"creation_time\"])\n ds.add_particle_filter(\"newstars\")\n ad = ds.all_data()\n\n\n\n\n ds.add_field(('star','metals'),function=_starmetals,units=\"code_metallicity\",sampling_type='particle')\n ds.add_field(('star','coordinates'),function=_starcoordinates,units=\"cm\",sampling_type='particle')\n ds.add_field(('stellar','ages'),function=_stellarages,units='Gyr',sampling_type='particle')\n ds.add_field(('star','masses'),function=_starmasses,units='g',sampling_type='particle')\n ds.add_field(('gas','density'),function=_gasdensity,units='g/cm**3',sampling_type='cell')\n ds.add_field(('gas','metals'),function=_gasmetals,units=\"code_metallicity\",sampling_type='cell')\n ds.add_field(('gas','fh2'),function=_gasfh2,units='dimensionless',sampling_type='cell')\n ds.add_field(('gas','masses'),function=_gasmasses,units='g',sampling_type='cell')\n \n ad = ds.all_data()\n\n return ds\n","repo_name":"dnarayanan/powderday","sub_path":"powderday/front_ends/enzo2pd.py","file_name":"enzo2pd.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"72"}
+{"seq_id":"12615235725","text":"class Solution:\n def maxArea(self, height: List[int]) -> int:\n lp = 0\n rp = len(height) - 1\n max_area = 0\n\n\n while lp < rp:\n left_height = height[lp]\n right_height = height[rp]\n width = rp - lp\n if left_height < right_height:\n area = left_height * width \n max_area = max(max_area, area)\n lp += 1\n elif left_height >= right_height:\n area = right_height * width\n max_area = max(max_area, area)\n rp -= 1\n return max_area\n \n\n ","repo_name":"Elliott-Chong/LeetCode","sub_path":"12-Container-With-Most-Water/solution-1.py","file_name":"solution-1.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"}
+{"seq_id":"13504412445","text":"import time\nimport pigpio\nimport pygame\nfrom adafruit_servokit import ServoKit\n\npi = pigpio.pi()\n \npygame.init()\n \n# Loop until the user clicks the close button.\ndone = False\n \n# Used to manage how fast the screen updates\nclock = pygame.time.Clock()\n \n# Initialize the joysticks\nj = pygame.joystick.Joystick(0)\nj.init()\n\nkit = ServoKit(channels=8)\n\nmotor = 4\n\nwhile not done:\n # EVENT PROCESSING STEP\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n if event.type == pygame.JOYBUTTONDOWN:\n if j.get_button(8):\n pi.set_servo_pulsewidth(4, 1000) # off\n time.sleep(3) \n print(event.button)\n if j.get_button(9):\n pi.set_servo_pulsewidth(4, 1500) # 50% power\n time.sleep(3) \n print(event.button)\n if j.get_button(0):\n pi.set_servo_pulsewidth(4, 1800) # 80% power\n time.sleep(3) \n print(event.button)\n \n if j.get_axis(2):\n kit.servo[1].angle = 0 \n print(event.axis)\n if event.type == pygame.JOYBUTTONUP:\n print(\"Joystick button released.\")\n \n if event.type == pygame.JOYAXISMOTION:\n if j.get_axis(2):\n kit.servo[0].angle = 0 \n print(event.axis)\n if j.get_axis(2):\n kit.servo[0].angle = 0 \n print(event.axis)\npi.stop()\n","repo_name":"nigel-otieno/RPI_Plane-test","sub_path":"brushless_motor_test.py","file_name":"brushless_motor_test.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"3869776640","text":"#!/usr/bin/python3\n\"\"\"\nnew Class that inherits from rectangle\n\"\"\"\nfrom models.rectangle import Rectangle\n\n\nclass Square(Rectangle):\n \"\"\"\n this inherits from rectangle\n \"\"\"\n def __init__(self, size, x=0, y=0, id=None):\n super().__init__(size, size, x, y, id)\n\n def __str__(s):\n return (\n f\"[Square] ({s.id}) {s.x}/{s.y} - {s.width}\"\n )\n\n @property\n def size(self):\n \"\"\"get size\"\"\"\n return self.width\n\n @size.setter\n def size(self, value):\n \"\"\"set size to width and height\"\"\"\n self.width = value\n self.height = value\n\n def update(self, *args, **kwargs):\n \"\"\"function to assing newly attrs\"\"\"\n i = 0\n if args is not None and len(args) != 0:\n for arg in args:\n if i == 0:\n self.id = arg\n if i == 1:\n self.size = arg\n if i == 2:\n self.x = arg\n if i == 3:\n self.y = arg\n i += 1\n else:\n if kwargs is not None and len(kwargs) != 0:\n for key, value in kwargs.items():\n if key == \"id\":\n self.id = value\n if key == \"size\":\n self.width = value\n if key == \"x\":\n self.x = value\n if key == \"y\":\n self.y = value\n\n def to_dictionary(self):\n \"\"\"__dict__ representation of Square\"\"\"\n return {\"id\": self.id, \"size\": self.width, \"x\": self.x, \"y\": self.y}\n","repo_name":"sanei1509/holbertonschool-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/square.py","file_name":"square.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"17133155291","text":"import glob\nimport os\n\nimport gym\nfrom gym import error, spaces\nfrom gym import monitoring\nfrom gym.monitoring import monitor\nfrom gym.monitoring.tests import helpers\n\nclass FakeEnv(gym.Env):\n def _render(self, close=True):\n raise RuntimeError('Raising')\n\ndef test_monitor_filename():\n with helpers.tempdir() as temp:\n env = gym.make('CartPole-v0')\n env.monitor.start(temp)\n env.monitor.close()\n\n manifests = glob.glob(os.path.join(temp, '*.manifest.*'))\n assert len(manifests) == 1\n\ndef test_write_upon_reset_false():\n with helpers.tempdir() as temp:\n env = gym.make('CartPole-v0')\n env.monitor.start(temp, video_callable=False, write_upon_reset=False)\n env.reset()\n\n files = glob.glob(os.path.join(temp, '*'))\n assert not files, \"Files: {}\".format(files)\n\n env.monitor.close()\n files = glob.glob(os.path.join(temp, '*'))\n assert len(files) > 0\n\ndef test_write_upon_reset_true():\n with helpers.tempdir() as temp:\n env = gym.make('CartPole-v0')\n env.monitor.start(temp, video_callable=False, write_upon_reset=True)\n env.reset()\n\n files = glob.glob(os.path.join(temp, '*'))\n assert len(files) > 0, \"Files: {}\".format(files)\n\n env.monitor.close()\n files = glob.glob(os.path.join(temp, '*'))\n assert len(files) > 0\n\ndef test_close_monitor():\n with helpers.tempdir() as temp:\n env = FakeEnv()\n env.monitor.start(temp)\n env.monitor.close()\n\n manifests = monitor.detect_training_manifests(temp)\n assert len(manifests) == 1\n\ndef test_video_callable_true_not_allowed():\n with helpers.tempdir() as temp:\n env = gym.make('CartPole-v0')\n try:\n env.monitor.start(temp, video_callable=True)\n except error.Error:\n pass\n else:\n assert False\n\ndef test_video_callable_false_does_not_record():\n with helpers.tempdir() as temp:\n env = gym.make('CartPole-v0')\n env.monitor.start(temp, video_callable=False)\n env.reset()\n env.monitor.close()\n results = monitoring.load_results(temp)\n assert len(results['videos']) == 0\n\ndef test_video_callable_records_videos():\n with helpers.tempdir() as temp:\n env = gym.make('CartPole-v0')\n env.monitor.start(temp)\n env.reset()\n env.monitor.close()\n results = monitoring.load_results(temp)\n assert len(results['videos']) == 1, \"Videos: {}\".format(results['videos'])\n\ndef test_env_reuse():\n with helpers.tempdir() as temp:\n env = gym.make('CartPole-v0')\n env.monitor.start(temp)\n env.monitor.close()\n\n env.monitor.start(temp, force=True)\n env.reset()\n env.step(env.action_space.sample())\n env.step(env.action_space.sample())\n env.monitor.close()\n\n results = monitor.load_results(temp)\n assert results['episode_lengths'] == [2], 'Results: {}'.format(results)\n\nclass AutoresetEnv(gym.Env):\n metadata = {'semantics.autoreset': True}\n\n def __init__(self):\n self.action_space = spaces.Discrete(1)\n self.observation_space = spaces.Discrete(1)\n\n def _reset(self):\n return 0\n\n def _step(self, action):\n return 0, 0, False, {}\n\ngym.envs.register(\n id='Autoreset-v0',\n entry_point='gym.monitoring.tests.test_monitor:AutoresetEnv',\n timestep_limit=2,\n)\ndef test_env_reuse():\n with helpers.tempdir() as temp:\n env = gym.make('Autoreset-v0')\n env.monitor.start(temp)\n\n env.reset()\n\n env.step(None)\n _, _, done, _ = env.step(None)\n assert done\n\n env.step(None)\n _, _, done, _ = env.step(None)\n assert done\n\ndef test_no_monitor_reset_unless_done():\n def assert_reset_raises(env):\n errored = False\n try:\n env.reset()\n except error.Error:\n errored = True\n assert errored, \"Env allowed a reset when it shouldn't have\"\n\n with helpers.tempdir() as temp:\n # Make sure we can reset as we please without monitor\n env = gym.make('CartPole-v0')\n env.reset()\n env.step(env.action_space.sample())\n env.step(env.action_space.sample())\n env.reset()\n\n # can reset once as soon as we start\n env.monitor.start(temp, video_callable=False)\n env.reset()\n assert_reset_raises(env)\n\n env.step(env.action_space.sample())\n env.step(env.action_space.sample())\n assert_reset_raises(env)\n\n # should allow resets after the episode is done\n d = False\n while not d:\n _, _, d, _ = env.step(env.action_space.sample())\n\n env.reset()\n\n env.step(env.action_space.sample())\n assert_reset_raises(env)\n\n env.monitor.close()\n\ndef test_only_complete_episodes_written():\n with helpers.tempdir() as temp:\n env = gym.make('CartPole-v0')\n\n env.monitor.start(temp, video_callable=False)\n env.reset()\n d = False\n while not d:\n _, _, d, _ = env.step(env.action_space.sample())\n\n env.reset()\n env.step(env.action_space.sample())\n\n env.monitor.close()\n\n # Only 1 episode should be written\n results = monitoring.load_results(temp)\n assert len(results['episode_lengths']) == 1, \"Found {} episodes written; expecting 1\".format(len(results['episode_lengths']))\n","repo_name":"wyndwarrior/imitation_from_observation","sub_path":"gym/monitoring/tests/test_monitor.py","file_name":"test_monitor.py","file_ext":"py","file_size_in_byte":5451,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"72"}
+{"seq_id":"23007908067","text":"\"\"\" \n\nMetrobus client server code \n\nThis module act as a standalone client, querying the \nselected node for recharges. This runs on a Le Potato\n\nFor research purposes and hardware limitations, this server\nqueries to the DTN CLIENT RECHARGE server for the available nodes \ndirections. \n\nHowever, once having that information. To check if a node\nhas a specific recharge, the server will use that information \nto then query to the INDIVIDUAL NODE AGENT SERVER. \n\nOnce queried, if, there's a recharge, then this server enables\nRFID module to write the new recharge status in the tag. \n\n\"\"\"\n\n\nfrom flask import Flask, render_template, request\nimport requests\nimport json\nfrom uuid import uuid4\nimport os\nfrom flask_sock import Sock\n\nimport binascii\nimport time\n\nfrom pn532pi import Pn532, pn532\nfrom pn532pi import Pn532I2c\n\n#set the communication interface to I2C\nPN532_I2C = Pn532I2c(1)\nnfc = Pn532(PN532_I2C)\n\n\n#search for pn532 chipset\ndef setup():\n print(\"-------Looking for PN532--------\")\n\n nfc.begin()\n\n versiondata = nfc.getFirmwareVersion()\n if not versiondata:\n print(\"Didn't find PN53x board\")\n raise RuntimeError(\"Didn't find PN53x board\") # halt\n\n # Got ok data, print it out!\n print(\"Found chip PN5 {:#x} Firmware ver. {:d}.{:d}\".format((versiondata >> 24) & 0xFF, (versiondata >> 16) & 0xFF,\n (versiondata >> 8) & 0xFF))\n\n # configure board to read RFID tags\n nfc.SAMConfig()\n\n\nDTN_CLIENT_RECHARGE_SERVER_ADDRESS = os.getenv(\n \"DTN_CLIENT_RECHARGE_SERVER_ADDRESS\", \"http://localhost:300'\"\n)\n\n#performs the operation of writing and reading the data in the card\ndef loop():\n\n # Wait for an ISO14443A type card (Mifare, etc.). When one is found\n # 'uid' will be populated with the UID, and uidLength will indicate\n # if the uid is 4 bytes (Mifare Classic) or 7 bytes (Mifare Ultralight)\n success, uid = nfc.readPassiveTargetID(cardbaudrate=pn532.PN532_MIFARE_ISO14443A_106KBPS)\n\n if (success):\n # Display some basic information about the card\n print(\"Found an ISO14443A card\")\n print(\"UID Length: {:d}\".format(len(uid)))\n print(\"UID Value: {}\".format(binascii.hexlify(uid)))\n\n # Make sure this is a Mifare Classic card\n if (len(uid) != 4):\n print(\"Ooops ... this doesn't seem to be a Mifare Classic card!\")\n return\n\n # We probably have a Mifare Classic card ...\n print(\"Seems to be a Mifare Classic card (4 byte UID)\")\n\n\napp = Flask(__name__, template_folder=\".\")\nsock = Sock(app)\nsock.init_app(app)\n\n\navailableNodes = []\n\n\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n\n\ndef searchNodeByName(name, nodeSet):\n return [element for element in nodeSet if element[\"name\"] == name] or None\n\n\n@sock.route(\"/ws\")\ndef echo(sock):\n while True:\n data = json.loads(sock.receive())\n\n node = data[\"value\"]\n\n sock.send(\n json.dumps(\n {\n \"type\": \"status\",\n \"value\": \"Fetching the recharges in the selected Metro bus\",\n }\n )\n )\n\n ##fetch recharges from node\n nodeAgentServer = searchNodeByName(node, availableNodes)[0]\n\n recharges = requests.get(nodeAgentServer[\"agentIP\"] + \"/get-recharges\").json()\n\n sock.send(\n json.dumps(\n {\n \"type\": \"status\",\n \"value\": \"Please approach the card into the NFC device...\",\n }\n )\n )\n\n # wait for nfc reader to read\n\n time.sleep(2)\n\n sock.send(\n json.dumps(\n {\n \"type\": \"status\",\n \"value\": \"Updating balance in the card\",\n }\n )\n )\n\n time.sleep(1)\n\n sock.send(\n json.dumps(\n {\n \"type\": \"status-complete\",\n \"value\": \"Balance updated, CARD BALANCE: \",\n }\n )\n )\n\n # download the data into the card\n\n sock.send(\n json.dumps(\n {\n \"type\": \"recharges\",\n \"value\": json.dumps(recharges),\n }\n )\n )\n\n \"\"\" match data.type:\n case \"new-selected-node\":\n sock.send(\n json.dumps(\n {\n \"type\": \"status\",\n \"value\": \"Please approach the card into the NFC device...\",\n }\n )\n ) \"\"\"\n\n # sock.send(data)\n\n\n# TO DO, SET UP SERIAL PORT HEARING\n\n\n@app.route(\"/get-nodes\", methods=[\"GET\"])\ndef getNodes():\n data = requests.get(\"http://localhost:3000/get-nodes\").json()\n\n global availableNodes\n\n availableNodes = data[:]\n\n return availableNodes\n\n\nif __name__ == \"__main__\":\n setup()\n app.run(host=\"0.0.0.0\", port=3001, debug=True)\n # app[\"TEMPLATES_AUTO_RELOAD\"] = True\n","repo_name":"Rulios/dtn-public-transport","sub_path":"metrobus-client/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"16935601780","text":"import torch\nimport torch.nn as nn\n\n\nclass Metric(nn.Module):\n def __init__(self):\n super(Metric, self).__init__()\n self.metric = None\n\n def forward(self, pred, target):\n return self.metric(pred, target)\n\n\nclass DiceMetric(nn.Module):\n def __init__(self):\n super(DiceMetric, self).__init__()\n\n def forward(self, pred, target):\n \"\"\"calc dice\n\n Args:\n pred (torch.tensor): (N, H, W)\n target (torch.tensor): (N, H, W)\n\n Returns:\n (torch.tensor): dice\n \"\"\"\n\n pred = pred.float()\n target = target.float()\n smooth = 1e-4\n\n p = torch.sigmoid(pred) > 0.5\n t = target > 0.5\n\n inter = (t*p).sum(dim=2).sum(dim=1).float()\n dim1 = (p).sum(dim=2).sum(dim=1).float()\n dim2 = (t).sum(dim=2).sum(dim=1).float()\n\n coeff = (2 * inter + smooth) / (dim1 + dim2 + smooth)\n dice_total = 1-coeff.sum(dim=0)/coeff.size(0)\n return dice_total\n","repo_name":"gunjunlee/pytorch-YOLO-v1","sub_path":"metric/metric.py","file_name":"metric.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"}
+{"seq_id":"42075128502","text":"import traceback\nfrom binance.client import Client\nfrom datetime import datetime\nfrom math import log\n\nfrom BL.BinanceWebSocket import BinanceWebSocket\nfrom Common.Binance.AccountInfo import AccountInfo\nfrom Common.Binance.Balance import Balance\nfrom Common.Binance.Trade import Trade\nfrom Common.Binance.Candle import Candle\nfrom Common.Binance.OrderBookTicker import OrderBookTicker\nfrom Common.Binance.PriceTicker import PriceTicker\nfrom Common.Binance.PriceTicker24 import PriceTicker24\nfrom Common.Market import Market\nfrom Common.QcParameters import QcParameters\n\n\nclass BinanceLibrary:\n url_base = \"https://api.binance.com/api/v3/klines?symbol={}&interval={}&limit={}\"\n\n def __init__(self, api_key, api_secret):\n try:\n self.client = Client(api_key, api_secret)\n self.ws = None\n except:\n print(traceback.format_exc())\n\n def exit(self):\n try:\n if self.ws:\n self.ws.exit()\n except:\n print(traceback.format_exc())\n\n def get_candles(self, symbol, interval, limit=500, start_time=None, end_time=None):\n candles = None\n try:\n data = self.client.get_klines(symbol=symbol, interval=interval, limit=limit, startTime=start_time, endTime=end_time)\n candles = [Candle(symbol,interval,*d) for d in data]\n except:\n print(traceback.format_exc())\n return candles\n\n def get_markets(self, quote_assets):\n markets = []\n try:\n symbols = self.client.get_exchange_info()['symbols']\n #print(*symbols,sep='\\n')\n for symbol in symbols:\n if symbol['status'] != 'TRADING':\n continue\n if not quote_assets or (symbol['quoteAsset'] in quote_assets):\n market = Market(Symbol=symbol['symbol'], BaseAsset=symbol['baseAsset'], QuoteAsset=symbol['quoteAsset'])\n if symbol['filters']:\n for filter in symbol['filters']:\n if filter['filterType'] == 'MIN_NOTIONAL':\n market.MinAmountToTrade = float(filter['minNotional'])\n if filter['filterType'] == 'LOT_SIZE':\n market.AmountDecimalDigits = round(-log(float(filter['stepSize']),10))\n market.MinQuantity = float(filter['minQty'])\n markets.append(market)\n except:\n print(traceback.format_exc())\n return markets\n\n def get_account_info(self):\n info = None\n try:\n info = AccountInfo(*self.client.get_account(recvWindow=59000).values())\n except:\n print(traceback.format_exc())\n return info\n\n def get_asset_balance(self, asset):\n balance = None\n try:\n balance = Balance(*self.client.get_asset_balance(asset=asset, recvWindow=59000).values())\n except:\n print(traceback.format_exc())\n return balance\n\n def get_server_time(self):\n server_time = None\n try:\n server_time = int(self.client.get_server_time()[\"serverTime\"])\n except:\n print(traceback.format_exc())\n return server_time\n\n def get_orderbook(self):\n order_book = None\n try:\n order_book = [OrderBookTicker(*ticker) for ticker in self.client.get_orderbook_tickers()]\n except:\n print(traceback.format_exc())\n return order_book\n\n def get_price_ticker(self, symbol):\n price_ticker = None\n try:\n result = self.client.get_symbol_ticker(symbol=symbol)\n price_ticker = PriceTicker(*result.values())\n except:\n print(traceback.format_exc())\n return price_ticker\n\n def get_price_ticker_24(self, symbol):\n price_ticker_24 = None\n try:\n result = self.client.get_ticker(symbol=symbol)\n price_ticker_24 = PriceTicker24(*result.values())\n except:\n print(traceback.format_exc())\n return price_ticker_24\n\n def get_trades(self, symbol):\n trades = None\n try:\n result = self.client.get_my_trades(symbol=symbol, recvWindow=59000)\n trades = [Trade(*trade.values()) for trade in result]\n except:\n print(traceback.format_exc())\n return trades\n\n def buy_asset(self, symbol, amount):\n try:\n return self.client.order_market_buy(\n symbol=symbol,\n quantity=amount, recvWindow=59000)\n except:\n print(traceback.format_exc())\n\n def sell_asset(self, symbol, amount):\n try:\n return self.client.order_market_sell(\n symbol=symbol,\n quantity=amount, recvWindow=59000)\n except:\n print(traceback.format_exc())\n\n def __convert_to_stream_names(self, symbols, interval):\n try:\n stream_names = []\n for symbol in symbols:\n stream_names.append(\"{}@kline_{}\".format(symbol.lower(), interval))\n return stream_names\n except:\n print(traceback.format_exc())\n\n ''' \n input:\n inverval: str\n markets: list of market objects\n '''\n def start_web_socket(self, symbols, interval, callback):\n try:\n print(\"start_web_socket\")\n streams = self.__convert_to_stream_names(symbols, interval)\n self.ws = BinanceWebSocket(self.client)\n self.ws.start(streams, callback)\n except:\n print(traceback.format_exc())\n\n def stop_web_socket(self):\n try:\n if self.ws:\n self.ws.stop()\n except:\n print(traceback.format_exc())\n\n\n","repo_name":"Senior-Develop/SWT-PYQT-APP","sub_path":"BL/BinanceLibrary.py","file_name":"BinanceLibrary.py","file_ext":"py","file_size_in_byte":5818,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"}
+{"seq_id":"29275005602","text":"\"\"\"\nThis module is used to solve the count_case problem in a string from the\nfunctions and methods homework. This solution uses a for loop and islower\nfunction to count the case of the letters in the sentence\n\"\"\"\n\n# Author: Marvin DaCosta, Created June 27, 2020, Last Modified: June 28, 2020\n\n# YouTube video on how to use collections in python:\n# https://bit.ly/2NBaCV6\n\n# Create a sample string to be used in function\nSAMPLE_STRING = 'Hello Mr. Rogers, how are you this fine Tuesday?'\n\n\n# Create a function to loop through each character in the string\ndef count_loop(the_string):\n \"\"\"\n This function is used to loop through each charcter in the string and\n count each lower and upper case character and then return the count for each\n \"\"\"\n count_u = 0\n count_l = 0\n for case in the_string:\n if case.isupper():\n count_u += 1\n elif case.islower():\n count_l += 1\n return count_l, count_u\n\n\nlower, upper = count_loop(SAMPLE_STRING)\nprint(\"There are\", upper, \"upper case and \", lower, \\\n \"lower case characters in the string\")\n","repo_name":"BornRiot/Python.Udemy.Complete_Python_BootCamp","sub_path":"methods_and_functions/functions_and_methods_hw/count_case.py","file_name":"count_case.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"}
+{"seq_id":"19951645732","text":"from contextlib import contextmanager\nimport datetime as dt\nfrom functools import partial\nimport json\nfrom typing import List, Callable, Dict, Any, Union, Optional\nfrom uuid import UUID\n\n\nfrom fastapi import Depends, HTTPException\nimport pandas as pd\nimport pymysql\nfrom pymysql import converters\nimport pytz\nfrom sqlalchemy.engine import create_engine # type: ignore\nfrom sqlalchemy.pool import QueuePool # type: ignore\n\n\nfrom . import settings, models, __version__\nfrom .auth import get_user_id\n\n\n# this is faster than using strftime\nTIMEFORMAT = \"'{0.year:04}-{0.month:02}-{0.day:02} {0.hour:02}:{0.minute:02}:{0.second:02}'\" # NOQA\n\n\ndef escape_timestamp(value, mapping=None):\n # adapted from the SolarForecastArbiter API under the above MIT license\n if value.tzinfo is not None:\n return TIMEFORMAT.format(value.tz_convert(\"UTC\"))\n else:\n return TIMEFORMAT.format(value)\n\n\ndef escape_datetime(value, mapping=None):\n # adapted from the SolarForecastArbiter API under the above MIT license\n if value.tzinfo is not None:\n return TIMEFORMAT.format(value.astimezone(dt.timezone.utc))\n else:\n return TIMEFORMAT.format(value)\n\n\ndef convert_datetime_utc(obj):\n # adapted from the SolarForecastArbiter API under the above MIT license\n unlocalized = converters.convert_datetime(obj)\n return pytz.utc.localize(unlocalized)\n\n\ndef _make_sql_connection_partial(\n host=None, port=None, user=None, password=None, database=None\n):\n # adapted from the SolarForecastArbiter API under the above MIT license\n conv = converters.conversions.copy()\n # either convert decimals to floats, or add decimals to schema\n conv[converters.FIELD_TYPE.DECIMAL] = float\n conv[converters.FIELD_TYPE.NEWDECIMAL] = float\n conv[converters.FIELD_TYPE.TIMESTAMP] = convert_datetime_utc\n conv[converters.FIELD_TYPE.DATETIME] = convert_datetime_utc\n conv[converters.FIELD_TYPE.JSON] = json.loads\n conv[UUID] = converters.escape_str\n conv[pd.Timestamp] = escape_timestamp\n conv[dt.datetime] = escape_datetime\n connect_kwargs = {\n \"host\": host or settings.mysql_host,\n \"port\": port or settings.mysql_port,\n \"user\": user or settings.mysql_user,\n \"password\": password or settings.mysql_password,\n \"database\": database or settings.mysql_database,\n \"binary_prefix\": True,\n \"conv\": conv,\n \"use_unicode\": True,\n \"charset\": \"utf8mb4\",\n \"init_command\": \"SET time_zone = '+00:00'\",\n }\n if settings.mysql_use_ssl:\n connect_kwargs[\"ssl\"] = {\"ssl\": True}\n getconn = partial(pymysql.connect, **connect_kwargs)\n return getconn\n\n\nengine = create_engine(\n \"mysql+pymysql://\",\n creator=_make_sql_connection_partial(),\n poolclass=QueuePool,\n pool_recycle=3600,\n pool_pre_ping=True,\n).pool\n\n\ndef ensure_user_exists(f: Callable) -> Callable:\n \"\"\"Decorator that ensures the DB user exists for the current auth0 ID.\n Only necessary on methods that require an existing user like create_*.\n \"\"\"\n\n def wrapper(cls, *args, **kwargs):\n cls.create_user_if_not_exists()\n return f(cls, *args, **kwargs)\n\n return wrapper\n\n\nclass StorageTransactionError(Exception):\n \"\"\"Errors raised in StorageInterface from missing method calls needed\n to complete a transaction\"\"\"\n\n pass\n\n\nclass StorageInterface:\n def __init__(self, user: str = Depends(get_user_id)):\n self.user = user\n self._cursor = None\n self.commit = True\n\n @property\n def cursor(self):\n if self._cursor is None:\n raise AttributeError(\"Cursor is only available within `start_transaction`\")\n return self._cursor\n\n @contextmanager\n def start_transaction(self):\n connection = engine.connect()\n cursor = connection.cursor(cursor=pymysql.cursors.DictCursor)\n self._cursor = cursor\n self._add_job_result_called = False\n self._final_job_status_set = False\n try:\n yield self\n except Exception:\n connection.rollback()\n raise\n else:\n if self.commit:\n connection.commit()\n finally:\n connection.close()\n self._cursor = None\n\n def try_query(self, query, args):\n # adapted from the SolarForecastArbiter API under the above MIT license\n try:\n self.cursor.execute(query, args)\n except (\n pymysql.err.OperationalError,\n pymysql.err.IntegrityError,\n pymysql.err.InternalError,\n pymysql.err.DataError,\n ) as err:\n ecode = err.args[0]\n msg = err.args[1]\n if ecode == 1142:\n raise HTTPException(status_code=404, detail=msg)\n elif ecode == 1062 or ecode == 1348:\n raise HTTPException(status_code=409, detail=msg)\n elif ecode == 3140 or ecode == 1406 or ecode == 1048 or ecode == 1054:\n raise HTTPException(status_code=400, detail=msg)\n else:\n raise\n\n def _call_procedure(\n self,\n procedure_name: str,\n *args,\n with_current_user: bool = True,\n ) -> dict:\n \"\"\"\n Can't user callproc since it doesn't properly use converters.\n Will not handle OUT or INOUT parameters without first setting\n local variables and retrieving from those variables\n \"\"\"\n # adapted from the SolarForecastArbiter API under the above MIT license\n if with_current_user:\n new_args = (self.user, *args)\n else:\n new_args = args\n query = f'CALL {procedure_name}({\",\".join([\"%s\"] * len(new_args))})'\n self.try_query(query, new_args)\n out: dict = self.cursor.fetchall()\n return out\n\n def _call_procedure_for_single(\n self,\n procedure_name: str,\n *args,\n with_current_user: bool = True,\n ) -> dict:\n \"\"\"Wrapper handling try/except logic when a single value is expected\"\"\"\n # adapted from the SolarForecastArbiter API under the above MIT license\n try:\n result: dict = self._call_procedure(\n procedure_name,\n *args,\n with_current_user=with_current_user,\n )[0]\n except IndexError:\n raise HTTPException(status_code=404)\n return result\n\n def create_user_if_not_exists(self) -> str:\n out: str = self._call_procedure_for_single(\"create_user_if_not_exists\")[\n \"user_id\"\n ]\n return out\n\n @ensure_user_exists\n def get_user(self) -> models.UserInfo:\n out = self._call_procedure_for_single(\"get_user\")\n out[\"object_id\"] = out.pop(\"user_id\")\n out[\"object_type\"] = \"user\"\n out[\"modified_at\"] = out[\"created_at\"]\n return models.UserInfo(**out)\n\n def _parse_system(self, sys: Dict[str, Any]) -> models.StoredPVSystem:\n sys[\"object_id\"] = sys.pop(\"system_id\")\n sys[\"object_type\"] = \"system\"\n return models.StoredPVSystem(**sys)\n\n def list_systems(self) -> List[models.StoredPVSystem]:\n systems = self._call_procedure(\"list_systems\")\n out = []\n for sys in systems:\n out.append(self._parse_system(sys))\n return out\n\n @ensure_user_exists\n def create_system(self, system_def: models.PVSystem) -> models.StoredObjectID:\n created = self._call_procedure_for_single(\n \"create_system\", system_def.name, system_def.json()\n )\n return models.StoredObjectID(\n object_id=created[\"system_id\"], object_type=\"system\"\n )\n\n def get_system(self, system_id: UUID) -> models.StoredPVSystem:\n system = self._call_procedure_for_single(\"get_system\", system_id)\n return self._parse_system(system)\n\n def delete_system(self, system_id: UUID):\n self._call_procedure(\"delete_system\", system_id)\n\n def update_system(\n self, system_id: UUID, system_def: models.PVSystem\n ) -> models.StoredObjectID:\n self._call_procedure(\n \"update_system\", system_id, system_def.name, system_def.json()\n )\n return models.StoredObjectID(object_id=system_id, object_type=\"system\")\n\n def get_system_hash(self, system_id: UUID) -> str:\n out: str = self._call_procedure_for_single(\"get_system_hash\", system_id)[\n \"system_hash\"\n ]\n return out\n\n @ensure_user_exists\n def create_system_model_data(self, system_id: UUID, dataset: models.DatasetEnum):\n self._call_procedure(\"create_system_data\", system_id, dataset)\n\n def get_system_model_meta(\n self, system_id: UUID, dataset: models.DatasetEnum\n ) -> models.SystemDataMeta:\n out = self._call_procedure_for_single(\n \"get_system_data_meta\", system_id, dataset\n )\n stored_hash = out.pop(\"system_hash\")\n if stored_hash is not None:\n current_hash = self.get_system_hash(system_id)\n out[\"system_modified\"] = stored_hash.lower() != current_hash\n else:\n out[\"system_modified\"] = False\n # present \"prepared\" status as \"queued\"\n if out[\"status\"] == \"prepared\":\n out[\"status\"] = \"queued\"\n return models.SystemDataMeta(**out)\n\n def update_system_model_data(\n self,\n system_id: UUID,\n dataset: models.DatasetEnum,\n system_hash: str,\n timeseries_data: Optional[bytes],\n statistics: Optional[bytes],\n error: Union[dict, List[dict]] = [],\n ):\n self._call_procedure(\n \"update_system_data\",\n system_id,\n dataset,\n timeseries_data,\n statistics,\n json.dumps(error),\n __version__,\n system_hash,\n )\n\n def get_system_model_timeseries(\n self, system_id: UUID, dataset: models.DatasetEnum\n ) -> bytes:\n res = self._call_procedure_for_single(\n \"get_system_timeseries\", system_id, dataset\n )\n if res[\"timeseries\"] is None:\n raise HTTPException(status_code=404, detail=\"No timeseries data available\")\n out: bytes = res[\"timeseries\"]\n return out\n\n def get_system_model_statistics(\n self, system_id: UUID, dataset: models.DatasetEnum\n ) -> bytes:\n res = self._call_procedure_for_single(\n \"get_system_statistics\", system_id, dataset\n )\n if res[\"statistics\"] is None:\n raise HTTPException(status_code=404, detail=\"No statistics available\")\n out: bytes = res[\"statistics\"]\n return out\n\n @ensure_user_exists\n def create_system_group(self, name: str):\n created = self._call_procedure_for_single(\"create_system_group\", name)\n return models.StoredObjectID(\n object_id=created[\"group_id\"], object_type=\"system_group\"\n )\n\n def update_system_group(self, group_id: UUID, name: str):\n self._call_procedure(\"update_system_group\", group_id, name)\n return models.StoredObjectID(object_id=group_id, object_type=\"system_group\")\n\n def delete_system_group(self, group_id: UUID):\n self._call_procedure(\"delete_system_group\", group_id)\n\n def _parse_system_group(self, group, group_systems=None):\n definition = {\"name\": group.pop(\"name\")}\n if group_systems is not None:\n # systems are an optional field, so that when we're listing\n # groups, we don't have to make so many calls\n systems = [self._parse_system(sys) for sys in group_systems]\n definition[\"systems\"] = systems\n group[\"object_id\"] = group.pop(\"group_id\")\n group[\"object_type\"] = \"system_group\"\n group[\"definition\"] = definition\n return models.StoredSystemGroup(**group)\n\n def get_system_group(self, group_id: UUID):\n group = self._call_procedure_for_single(\"get_system_group\", group_id)\n group_systems = self._call_procedure(\"get_group_systems\", group_id)\n return self._parse_system_group(group, group_systems)\n\n def list_system_groups(self):\n groups = self._call_procedure(\"list_system_groups\")\n out = []\n for group in groups:\n out.append(self._parse_system_group(group))\n return out\n\n def add_system_to_group(self, system_id: UUID, group_id: UUID):\n self._call_procedure(\"add_system_to_group\", system_id, group_id)\n\n def remove_system_from_group(self, system_id: UUID, group_id: UUID):\n self._call_procedure(\"remove_system_from_group\", system_id, group_id)\n\n\nclass ComputeManagementInterface(StorageInterface):\n \"\"\"A special interface to the database (that requires different permissions)\n to list all computations and allow setting a failure message on a computation.\n \"\"\"\n\n def __init__(self):\n self._cursor = None\n self.commit = True\n\n def list_system_data_status(self) -> List[models.ManagementSystemDataStatus]:\n with self.start_transaction() as st:\n res = st._call_procedure(\"list_system_data_status\", with_current_user=False)\n\n def repq(d):\n if d[\"status\"] == \"prepared\":\n d[\"status\"] = \"queued\"\n return d\n\n return [models.ManagementSystemDataStatus(**repq(r)) for r in res]\n\n def report_failure(self, system_id: str, dataset: str, message: str):\n with self.start_transaction() as st:\n st._call_procedure(\n \"report_failure\", system_id, dataset, message, with_current_user=False\n )\n","repo_name":"UARENForecasting/ESPRR","sub_path":"api/esprr_api/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":13563,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"}
+{"seq_id":"32007001876","text":"import boto3\nimport re\nimport os\n\n\ndef get_s3_client():\n \"\"\"\n Get S3 Client from boto3\n :return:\n \"\"\"\n return boto3.client('s3')\n\n\ndef delete_objects_from_bucket(bucket_name: str, object_keys: list):\n client = get_s3_client()\n print(object_keys)\n client.delete_objects(Bucket=bucket_name, Delete={\"Objects\": [{\"Key\": key} for key in object_keys]})\n\n\ndef find_objects_by_tag(bucket_name: str, key_name: str, value_pattern: str):\n client = get_s3_client()\n paginator = client.get_paginator(\"list_objects_v2\")\n found_keys = []\n for result in paginator.paginate(Bucket=bucket_name):\n bucket_objects = result.get(\"Contents\", [])\n for obj in bucket_objects:\n object_tags = client.get_object_tagging(Bucket=bucket_name, Key=obj[\"Key\"])\n all_tags = object_tags.get(\"TagSet\", [])\n if len(all_tags) > 0 and any(\n tag.get(\"Key\") == key_name and re.match(value_pattern, tag.get(\"Value\")) for tag in all_tags):\n found_keys.append(obj.get(\"Key\"))\n return found_keys\n\n\ndef find_objects_by_metadata(bucket_name: str, key_name: str, value_pattern: str):\n client = get_s3_client()\n paginator = client.get_paginator(\"list_objects_v2\")\n found_keys = []\n for result in paginator.paginate(Bucket=bucket_name):\n bucket_objects = result.get(\"Contents\", [])\n for obj in bucket_objects:\n object_meta_data = client.head_object(Bucket=bucket_name, Key=obj[\"Key\"])\n all_meta_tags = object_meta_data.get(\"Metadata\", [])\n if re.match(value_pattern, all_meta_tags.get(key_name)):\n found_keys.append(obj.get(\"Key\"))\n return found_keys\n\n\ndef handler(event, context):\n \"\"\"\n Lambda Handler to delete objects from bucket using tags and meta data\n :param event:\n :param context:\n :return:\n \"\"\"\n # find objects if their tag key name is user_name and its value contains ch\n delete_keys_tags = find_objects_by_tag(bucket_name=os.environ[\"BUCKET_NAME\"], key_name=\"user_name\",\n value_pattern=\".*ch.*\")\n # find objects if their metadata email has .org at last\n delete_keys_meta = find_objects_by_metadata(bucket_name=os.environ[\"BUCKET_NAME\"], key_name=\"email\",\n value_pattern=\".*\\.org\")\n # delete the objects found above\n\n delete_objects_from_bucket(bucket_name=os.environ[\"BUCKET_NAME\"], object_keys=delete_keys_meta)\n delete_objects_from_bucket(bucket_name=os.environ[\"BUCKET_NAME\"], object_keys=delete_keys_tags)\n return \"Success\"\n\n\nif __name__ == \"__main__\":\n handler(None, None)\n","repo_name":"404shades/RohanIPATrainingAWS","sub_path":"lambda/s3_query_select_delete/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"72894649193","text":"\"\"\"Test the logger extension module.\"\"\"\n# pylint: disable=protected-access,redefined-outer-name,unused-variable,invalid-name\nimport importlib\nimport sys\nimport unittest\n\nimport sentry_sdk\nfrom flask import Flask\n\nimport flask_logger\n\nTEST_DSN = 'http://foo:bar@sentry.local/1?timeout=1'\n\n\ndef create_app():\n \"\"\"Create a Flask app for context.\"\"\"\n app = Flask(__name__)\n return app\n\n\nclass TestSentrySdkImport(unittest.TestCase):\n \"\"\"Test logger when sentrysdk isn't installed.\"\"\"\n\n def setUp(self):\n \"\"\"Set up tests.\"\"\"\n # Force flask_logger to load without sentry_sdk in the environment\n sys.modules['sentry_sdk'] = None\n importlib.reload(flask_logger.extension)\n self.app = create_app()\n self.ctx = self.app.app_context()\n self.ctx.push()\n\n def tearDown(self):\n \"\"\"Tear down tests.\"\"\"\n self.ctx.pop()\n # reset any mock loggers at module level\n # pylint: disable=invalid-name\n LOGGERS = {} # noqa\n sys.modules['sentry_sdk'] = sentry_sdk\n # Reload flask logger to restore sys.modules to correct state\n importlib.reload(flask_logger.extension)\n\n def test_log_without_sentrysdk(self):\n \"\"\"Test establishing logger when sentry_sdk isn't installed.\"\"\"\n logger = flask_logger.Logger()\n logger.init_app(self.app)\n with self.assertRaises(Exception) as context:\n logger.error('no_sentry_sdk_logger', 'this will raise an exception', dsn=TEST_DSN)\n self.assertEqual(\n str(context.exception),\n 'If specifying SENTRY_DSN, sentry_sdk must be installed '\n '(pip install flask-logger[Sentry])'\n )\n","repo_name":"bbelyeu/flask-logger","sub_path":"flask_logger/tests/test_sentrysdk_import.py","file_name":"test_sentrysdk_import.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"}
+{"seq_id":"10787181573","text":"import configparser\nimport os\nimport csv\nfrom typing import Union\n\ndef print_message(message: str, level: int=0) -> None:\n colors = {\n 0: '\\x1b[0m', # DEFAULT (info)\n 1: '\\x1b[1;33m', # YEL (warn)\n 2: '\\x1b[1;31m', # RED (error)\n 3: '\\x1b[1;34m', # BLUE (success)\n }\n print(f\"{colors[level]}{message}{colors[0]}\")\n\ndef print_error(message: str) -> None:\n print_message(message, level=2)\n\ndef print_warning(message: str) -> None:\n print_message(message, level=1)\n\ndef print_success(message: str) -> None:\n print_message(message, level=3)\n\ndef get_config(configfile: str) -> Union[tuple, bool]:\n config = configparser.ConfigParser()\n with open(configfile) as file:\n config.read_file(file)\n if 'remote' in config:\n datauser = config.get('remote', 'datauser')\n serverip = config.get('remote', 'serverip')\n sudo = config.getboolean('remote', 'sudo')\n cachelimit = (config.getfloat('local_cache', 'limit') * 1073741824) # GB to bytes\n return (datauser, serverip, sudo, cachelimit)\n\n print_error(\"ERROR config section expected: remote\")\n return None\n\ndef read_source_dest_csv(filename: str) -> dict:\n source_to_dest = []\n with open(filename, \"r\") as csv_file:\n csv_reader = csv.reader(csv_file)\n for line in csv_reader:\n try:\n source, dest = \"/\" + line[0].strip().strip(\"/\"), \"/\" + line[1].strip().strip(\"/\")\n if source != \"/\" and dest != '/':\n source_to_dest.append((source, dest))\n else:\n print_warning(f\"WARNING: Cannot read line in csv, skipping: {line}\")\n except Exception:\n print_warning(f\"WARNING: Cannot read line in csv, skipping: {line}\")\n\n return source_to_dest\n\ndef create_dir(path: str) -> bool:\n \"\"\"\n Creates a local directory, if it does not exist.\n Returns True upon succes or existence. False otherwise.\n \"\"\"\n try:\n os.makedirs(path, exist_ok=True)\n return True\n except Exception:\n return False\n\ndef write_csv(success: list,\n failure: list,\n successpath: str,\n failurepath: str) -> None:\n with open(successpath, 'w') as out:\n csv_out = csv.writer(out)\n csv_out.writerow(['iRODS', 'local'])\n for row in success:\n csv_out.writerow(row)\n\n print_message(f\"Wrote succesful transfers to {successpath}\")\n\n with open(failurepath, 'w') as out:\n csv_out = csv.writer(out)\n csv_out.writerow(['iRODS', 'local', 'reason'])\n for row in failure:\n csv_out.writerow(row)\n\n print_message(f\"Wrote failed transfers to {failurepath}\")\n","repo_name":"UtrechtUniversity/iBridges-SteppingStone","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"31169559055","text":"import json\nimport os\nimport zipfile\n\nimport requests\n\n\nclass Client:\n\n def __init__(self, host, port=None, protocol='https', token=None):\n self.host = host\n self.port = port\n self.token = token\n self.protocol = protocol\n\n @property\n def base_url(self):\n if self.port:\n return f'{self.protocol}://{self.host}:{self.port}'\n else:\n return f'{self.protocol}://{self.host}'\n\n def get_upload_token(self, api_token):\n r = requests.get(f'{self.base_url}/api/auth/token/upload', headers={'Authorization': f'Bearer {api_token}'})\n r.raise_for_status()\n self.token = r.json()['access_token']\n\n @staticmethod\n def create_zipfile(html_path, working_dir=None):\n if working_dir is None:\n working_dir = os.getcwd()\n zip_fname = os.path.join(working_dir, 'docs-upload.zip')\n zipf = zipfile.ZipFile(zip_fname, 'w', zipfile.ZIP_DEFLATED)\n for dirname, _, files in os.walk(html_path):\n for filename in files:\n filepath = os.path.join(dirname, filename)\n zipf.write(filepath, arcname=os.path.relpath(filepath, html_path))\n zipf.close()\n return zip_fname\n\n def upload_zipfile(self, zipfile, name, version, repository, tags=None, ):\n if tags is None:\n tags = list()\n values = json.dumps({'version': version,\n 'name': name,\n 'repository': repository,\n 'tags': tags})\n response = requests.post(f'{self.base_url}/api/docs/upload', data=values,\n headers={'Authorization': f'Bearer {self.token}'})\n response.raise_for_status()\n upload_url = response.content.decode().split('Location: ')[1][:-1]\n response = requests.put(upload_url, files={'documentation': ('docs-upload.zip', open(zipfile, 'rb').read())},\n headers={'Authorization': f'Bearer {self.token}'})\n response.raise_for_status()\n return response\n\n def upload_dir(self, html_path, name, version, repository, tags=None, working_dir=None):\n self.upload_zipfile(self.create_zipfile(html_path, working_dir), name, version, repository, tags)\n","repo_name":"djpugh/docserver","sub_path":"src/docserver/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"74070097193","text":"import pandas as pd\nimport json\nimport re\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\nimport numpy as np\nimport warnings\nfrom sklearn.model_selection import train_test_split\nimport jsonlines\nfrom sklearn.naive_bayes import MultinomialNB\n\nwarnings.filterwarnings('ignore')\n\nlist_artists = ['Queen', 'Muse', 'Janelle Monáe', 'Hot Chip',\n 'LCD Soundsystem', 'The Postal Service',\n 'Daft Punk', 'The Strokes']\n\ndf = pd.read_json('lyrics.jl', lines=True)\ndf_fix = df[df.titles != '']\nfor artist in list_artists:\n df_fix.loc[df_fix.artists.str.contains(artist), 'main_artist']=artist\n\ndf_fix = df_fix.dropna()\n\ndf_fix = df_fix.drop_duplicates(subset = 'lyrics')\ndf_fix.titles.value_counts().head(20)\ndf_fix[df_fix['titles']=='Invincible']\ndf_nodupe = df_fix.drop_duplicates(subset = ['titles', 'main_artist'])\ndf_nodupe.loc[:,'lyrics'] = df_nodupe['lyrics'].str.replace('\\r\\n', ' ') #could probably do this better with regex\ndf_nodupe.loc[:,'lyrics'] = df_nodupe['lyrics'].str.replace('\\n', ' ')\ndf_nodupe.drop_duplicates(subset = 'lyrics')\ndf_nodupe.dropna(inplace=True)\ndf_nodupe['first_6'] = df_nodupe.titles.str[0:8]\ndf_nodupe.first_6 = df_nodupe.first_6.str.lower()\ndf_nodupe_title = df_nodupe.drop_duplicates(subset = ['first_6', 'main_artist'])\ndf_nodupe_title[df_nodupe_title.first_6.str.contains('how')]\n\nX = df_nodupe_title.lyrics\ny = df_nodupe_title.main_artist\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)\n\ncv = CountVectorizer(lowercase=True, stop_words='english', token_pattern='[a-z]+')\ncv.fit(X_train)\nX_cv = cv.transform(X_train)\nX_test_cv = cv.transform(X_test)\n\nX_cv.todense()\n\nnb = MultinomialNB()\n\nnb.fit(X_cv, y_train)\ny_pred = nb.predict(X_test_cv)\nnb.fit(X_tf, y_train)\ny_pred_tfid = nb.predict(X_test_tf)\n\npredictions_cv = nb.predict_proba(X_test_cv)\nimport seaborn as sns\nsns.heatmap(predictions_cv)\n\ndef bayes_eval(y_true, y_pred, listofartists):\n cv_confusionmatrix = confusion_matrix(y_true, y_pred, labels = listofartists ,normalize = 'true' )\n ax = sns.heatmap(cv_confusionmatrix, annot = True, xticklabels = listofartists, yticklabels = listofartists)\n ax.set(xlabel='Predicted', ylabel='True')\n print(classification_report(y_true, y_pred))\n\nimport spacy\nmodel = spacy.load('en_core_web_md')\nX_tokens = [model(song) for song in X]\n\nlemmatized_word = []\nlemmatized_song = ''\nX_lemmatized = []\nfor song in X_tokens:\n lemmatized_word = []\n for word in song:\n lemmatized_word.append(word.lemma_)\n lemmatized_song = ' '.join(lemmatized_word)\n X_lemmatized.append(lemmatized_song)\nX_lemmatized[5]\n\nX_train_lem, X_test_lem, y_train, y_test = train_test_split(X_lemmatized, y, test_size=0.33, random_state=42)\ncv_lem = CountVectorizer(stop_words='english', lowercase = False, tokenizer = None, token_pattern='[a-z]+')\ncv_lem.fit(X_train_lem)\nX_train_lemcv = cv_lem.transform(X_train_lem)\nX_test_lemcv = cv_lem.transform(X_test_lem)\ncv_lem.get_feature_names()==cv.get_feature_names()\n\n\n# In[56]:\n\n\nnb.fit(X_train_lemcv, y_train)\ny_pred_lemcv = nb.predict(X_test_lemcv)\nbayes_eval(y_test, y_pred_lemcv, list_artists) #slightly different\n\n\n# In[57]:\n\n\ncv_lem = CountVectorizer(stop_words='english', lowercase = False, tokenizer = None, token_pattern='[a-z]+', min_df = 3)\ncv_lem.fit(X_train_lem)\nX_train_lemcv = cv_lem.transform(X_train_lem)\nX_test_lemcv = cv_lem.transform(X_test_lem)\ncv_lem.get_feature_names()==cv.get_feature_names()\n\nnb.fit(X_train_lemcv, y_train)\ny_pred_lemcv = nb.predict(X_test_lemcv)\nbayes_eval(y_test, y_pred_lemcv, list_artists) #changing min_df to 3 really improved things\n\n\n# In[58]:\n\n\nbayes_eval(y_test, y_pred_lemcv, list_artists) #changing min_df to 3\n\n\n# In[59]:\n\n\nvectorizer = TfidfVectorizer(lowercase=True, stop_words='english', token_pattern='[a-z]+', min_df= 5)\nX_train_lemtf =vectorizer.fit_transform(X_train_lem)\nX_test_lemtf = vectorizer.transform(X_test_lem)\n\n\n# In[60]:\n\n\nnb.fit(X_train_lemtf, y_train)\ny_pred_lemtf = nb.predict(X_test_lemtf)\nbayes_eval(y_test, y_pred_lemtf, list_artists) #still really really bad\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# # oversampling and undersampling\n\n# In[61]:\n\n\n#undersample first\nfrom imblearn.under_sampling import RandomUnderSampler, NearMiss\n\n\n# In[62]:\n\n\nlist_artists\n\n\n# In[63]:\n\n\ndf_nodupe_title.groupby('main_artist').count()\n\n\n# In[95]:\n\n\nsamp_dict = {'Queen':50, 'Muse':50, 'Janelle Monáe':30, 'Hot Chip':30,\n 'LCD Soundsystem':30, 'The Postal Service':6, 'Daft Punk':30, 'The Strokes':38 }\nrus = RandomUnderSampler(random_state=10, sampling_strategy=samp_dict)\nnm = NearMiss(sampling_strategy=samp_dict)\n\n\n# In[96]:\n\n\nX_rus, y_rus = rus.fit_resample(X_train_lemcv, y_train)\nX_nm, y_nm = nm.fit_resample(X_train_lemcv, y_train)\n\n\n# In[97]:\n\n\nX_rus.shape, y_rus.shape, np.unique(y_rus, return_counts=True)\n\n\n# In[67]:\n\n\nX_nm.shape, y_nm.shape, np.unique(y_nm, return_counts=True) #decreased overall amount of songs by about half\n\n\n# In[68]:\n\n\nX_train_lemcv.shape\n\n\n# In[ ]:\n\n\nnb.fit(X_rus, y_rus)\ny_pred_rus = nb.predict(X_test_lemcv)\n\n\n# In[69]:\n\n\nbayes_eval(y_test, y_pred_rus, list_artists) #rus destroyed accuracy - and queen can't be guessed as queen\n\n\n# In[ ]:\n\n\nnb.fit(X_nm, y_nm)\ny_pred_nm = nb.predict(X_test_lemcv)\n\n\n# In[70]:\n\n\nbayes_eval(y_test, y_pred_nm, list_artists) # near miss undersample\n\n\n# In[71]:\n\n\n# try oversampling instead\n\n\n# In[72]:\n\n\nfrom imblearn.over_sampling import RandomOverSampler, SMOTE\nupsample_dict = {'Queen':144, 'Muse':75, 'Janelle Monáe':50, 'Hot Chip':64, 'LCD Soundsystem':50,\n 'The Postal Service':20, 'Daft Punk':50, 'The Strokes':50 }\n\nros = RandomOverSampler(random_state=10)\n\n\n# In[73]:\n\n\nX_ros, y_ros = ros.fit_resample(X_train_lemcv, y_train)\n\n\n# In[74]:\n\n\nnp.unique(y_train, return_counts=True)\n\n\n# In[75]:\n\n\nnp.unique(y_ros, return_counts=True)\n\n\n# In[ ]:\n\n\nnb.fit(X_ros, y_ros)\ny_pred_ros = nb.predict(X_test_lemcv)\n\n\n# In[76]:\n\n\nbayes_eval(y_test, y_pred_ros, list_artists) #ros upsample to 144 for each\n\n\n# In[77]:\n\n\nfrom imblearn.over_sampling import SMOTE\n\n\n# In[78]:\n\n\nsm = SMOTE(random_state=42)\n\n\n# In[79]:\n\n\nX_sm, y_sm = sm.fit_resample(X_train_lemcv, y_train)\n\n\n# In[80]:\n\n\nnp.unique(y_sm, return_counts=True)\n\n\n# In[ ]:\n\n\nnb.fit(X_sm, y_sm)\ny_pred_sm = nb.predict(X_test_lemcv)\n\n\n# In[81]:\n\n\n#SMOTE upsample to 144 for each\nbayes_eval(y_test, y_pred_sm, list_artists) #works better for me than random\n\n\n# In[82]:\n\n\n#combine upsample and down sample\n\n\n# In[83]:\n\n\nfrom imblearn.combine import SMOTEENN\n\n\n# In[84]:\n\n\nsamp_dict\n\n\n# In[85]:\n\n\nsme = SMOTEENN(random_state=42)\n\n\n# In[86]:\n\n\nX_sme, y_sme = sme.fit_resample(X_train_lemcv, y_train)\n\n\n# In[87]:\n\n\nnp.unique(y_sme, return_counts=True)\n\n\n# In[ ]:\n\n\nnb.fit(X_sme, y_sme)\ny_pred_sme = nb.predict(X_test_lemcv)\n\n\n# In[88]:\n\n\nbayes_eval(y_test, y_pred_sme, list_artists) #that really killed accuracy, why did it bring the queen songs down to 2?\n\n\n# In[89]:\n\n\nfrom imblearn.combine import SMOTETomek\nupsample_dict\n\n\n# In[126]:\n\n\nsmt_dict = {'Queen': 144,'Muse': 100,'Janelle Monáe': 60, 'Hot Chip': 64, 'LCD Soundsystem': 60,'The Postal Service': 30,'Daft Punk': 60,'The Strokes': 60}\nsmt = SMOTETomek(random_state=42 )\nsampling_strategy=smt_dict\nX_smt, y_smt = smt.fit_resample(X_train_lemcv, y_train)\nnb.fit(X_smt, y_smt)\ny_pred_smt = nb.predict(X_test_lemcv)\n\n\n# In[128]:\n\n\nbayes_eval(y_test, y_pred_smt, list_artists) #SMOTETomek looks the same as SMOTE - 144 songs for each\n\n\n# In[ ]:\n\n\n\n\n\n# In[91]:\n\n\n# if time, write function to optimize the upsampling\n\n\n# In[92]:\n\n\n# make word clouds for fun\n\n\n# In[93]:\n\n\nfrom matplotlib import pyplot as plt\nimport wordcloud\n\n#mask = np.____((500, ____, 3), _____)\n#mask[150:350,____:350,:] = 255 # masked out area\ndef make_wordclouds(all_songs_list):\n fig, axs = plt.subplots(nrows=4, ncols=2,figsize=(10, 10))\n axs = axs.flatten()\n list_artists = ['Queen', 'Muse', 'Janelle Monáe', 'Hot Chip', 'LCD Soundsystem','The Postal Service', 'Daft Punk', 'The Strokes']\n# axes_list =[ ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8]\n for i, artist_list in enumerate(allsongs_list):\n cloud = wordcloud.WordCloud(background_color=\"white\",\n max_words=50,\n collocations=True, # calculates frequencies\n contour_color='steelblue').generate(''.join(artist_list))\n # stop words are removed!\n axs[i].imshow(cloud, interpolation='bilinear')\n axs[i].axis('off')\n name = str(artist_list)\n axs[i].set_title(str(list_artists[i]))\n\nplt.show()\n\n\n# In[122]:\n\n\nmake_wordclouds(allsongs_list)\n\n\n# In[ ]:\n\n\n\n\n\n# In[99]:\n\n\n#trying a random forest classifier for fun\nfrom sklearn.ensemble import RandomForestClassifier\nrfc = RandomForestClassifier()\n\n\n# In[100]:\n\n\nrfc.fit(X_train_lemcv, y_train)\ny_pred_rfc = rfc.predict(X_test_lemcv)\n\n\n# In[101]:\n\n\nbayes_eval(y_test, y_pred_rfc, list_artists) #randomforest did a little worse than the bayes\n\n\n# In[102]:\n\n\nsamp_dict\n\n\n# In[103]:\n\n\nupsample_dict\n\n\n# In[104]:\n\n\ndownsam_dict = {'Queen': 100,\n 'Muse': 75,\n 'Janelle Monáe': 50,\n 'Hot Chip': 64,\n 'LCD Soundsystem': 50,\n 'The Postal Service': 20,\n 'Daft Punk': 40,\n 'The Strokes': 50}\n\n\n# In[105]:\n\n\nnp.unique(y_train, return_counts=True)\n\n\n# In[106]:\n\n\nfrom imblearn.under_sampling import EditedNearestNeighbours\nfrom imblearn.pipeline import make_pipeline\nupsmote= SMOTE(random_state=42, sampling_strategy= upsample_dict)\nenn = EditedNearestNeighbours() # this works poorly in the pipeline\nrus = RandomUnderSampler(random_state=42, sampling_strategy=downsam_dict)\n\n\n# In[107]:\n\n\nup_down_pipeline = make_pipeline(upsmote, rus, nb)\n\n\n# In[108]:\n\n\nup_down_pipeline.fit(X_train_lemcv, y_train)\n\n\n# In[109]:\n\n\ny_pred_pipeline = up_down_pipeline.predict(X_test_lemcv)\n\n\n# In[110]:\n\n\nbayes_eval(y_test, y_pred_pipeline, list_artists) # Is this better? Unclear\n\n\n# In[111]:\n\n\n#try pipeline with tfidf vectorized data\nup_down_pipeline.fit(X_train_lemtf, y_train)\n\n\n# In[112]:\n\n\ny_pred_pipeline_tf = up_down_pipeline.predict(X_test_lemtf)\n\n\n# In[113]:\n\n\nbayes_eval(y_test, y_pred_pipeline, list_artists) #with resampling, looks essentially the same was with cv\n\n\n# In[124]:\n\n\naccuracy_summary = {'Strategy':['CV', 'Tfidf', 'CV+lemma', 'CV+lemma+min_df','Tfidf+lemma', 'Tfidf+lemma+min_df',\n 'CV+lemma+Rus',\n 'CV+lemma+NearMiss', 'CV+lemma+Ros', 'CV+lemma+SMOTE', 'CV+lemma+smoteteen',\n 'CV+lemma+Smotetomek','Random Forest - CV+lemma', 'CV+lemma+pipeline',\n 'Tfidf+lemma+pipeline'], 'Accuracy':[0.47, 0.28, 0.55, 0.55, 0.28,\n 0.29, 0.44, 0.42, 0.49,0.56,\n 0.35, 0.53, 0.45, 0.51, 0.51]}\ndf_summary = pd.DataFrame(accuracy_summary, columns = ['Strategy', 'Accuracy'])\n\n\n# In[125]:\n\n\ndf_summary.sort_values('Accuracy', ascending = False)\n\n\n# In[123]:\n\n\nmake_wordclouds(allsongs_list)\n\n\n# ### What I've learned from doing this:\n# - Lemmatization makes a big difference\n# - Several issues with really small sample sizes\n# - in training\n# - in calculating accuracy\n# - Pipelines/resampling/etc.\n# ### Further questions:\n# - How to more effectively use Spacy\n# - How can we use Spacy to effectively look at document similarity\n# - Upsampling before tfidf?\n# ### Things to add to this project:\n# - Combine into single py file with the best looking model, allow user input of new song for test (in progress)\n# - Iteratively remove artists to see what the best combination is\n# - Iterirate through different values for the sampling strategies to optimize pipeline\n# - Add features - usage of parts of speech, sentiment analysis\n# - Mask word clouds onto some kind of symbol/art for the artist\n# - Add Spotify playlist\n\n# In[116]:\n\n\nget_ipython().system('dbus-send --print-reply --dest=org.mpris.MediaPlayer2.spotify /org/mpris/MediaPlayer2 org.mpris.MediaPlayer2.Player.Play')\n\n\n\n# In[117]:\n\n\n#try the spacy with spacys own vectorization\ntype(X_tokens[1])\n\n\n# In[118]:\n\n\n#turn each word into a vector\ndef vector(tokens):\n\n song_vectors = []\n for item in tokens:\n word_vectors=[]\n for word in item:\n word_vectors.append(model.vocab[word].vector)\n song_vectors.append(word_vectors)\n return song_vectors\n\n\n# In[119]:\n\n\n#totally confused\nspacy_vectors = vector(X_tokens)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[120]:\n\n\nfrom spacy.lemmatizer import Lemmatizer\nfrom spacy.lookups import Lookups\n\nlookups = Lookups()\nlemmatizer = Lemmatizer(lookups)\nX_lemmas = lemmatizer(X)\nprint(X_lemmas)\n\n#this is a dead end - lemmatizer is only for single words maybe?\n\n\n# In[121]:\n\n\n# make lists for each artist\n# maybe this was unecessary\n\nqueen_list = df_nodupe_title[df_nodupe_title['main_artist']=='Queen'].lyrics.to_list()\nmuse_list = df_nodupe_title[df_nodupe_title['main_artist']=='Muse'].lyrics.to_list()\njanelle_list = df_nodupe_title[df_nodupe_title['main_artist']=='Janelle Monáe'].lyrics.to_list()\nhotchip_list = df_nodupe_title[df_nodupe_title['main_artist']=='Hot Chip'].lyrics.to_list()\nlcd_list = df_nodupe_title[df_nodupe_title['main_artist']=='LCD Soundsystem'].lyrics.to_list()\npostalservice_list = df_nodupe_title[df_nodupe_title['main_artist']=='The Postal Service'].lyrics.to_list()\ndaftpunk_list = df_nodupe_title[df_nodupe_title['main_artist']=='Daft Punk'].lyrics.to_list()\nstrokes_list = df_nodupe_title[df_nodupe_title['main_artist']=='The Strokes'].lyrics.to_list()\n\nallsongs_list = [queen_list, muse_list, janelle_list, hotchip_list, lcd_list, postalservice_list, daftpunk_list,strokes_list]\n\n#there has got to be a better way to do this, but can't change the list name in a for loop\n\n#for artist in list_artists:\n #lyrics_dict = df_nodupe_title[df_nodupe_title['main_artist']==str(artist)].lyrics.to_dict()\n\n#df_nodupe_title.index = df_nodupe_title.main_artist\n#lyrics_dict = df_nodupe_title.to_dict('index')# dictionary is overwriting\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n","repo_name":"thedinak/lyrics_analysis","sub_path":"lyrics_classifier.py","file_name":"lyrics_classifier.py","file_ext":"py","file_size_in_byte":14199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"9702366538","text":"# -*- encoding: utf8 -*-\n# created by Toons on 01/05/2017\nimport sys, binascii\nimport json, requests\n\n__PY3__ = True if sys.version_info[0] >= 3 else False\nif __PY3__:\n\tfrom io import BytesIO as StringIO\n\tlong = int\nelse:\n\tfrom StringIO import StringIO\n\n\n# GET generic method for ARK API\ndef get(api, dic={}, **kw):\n\treturnkey = kw.pop(\"returnKey\", False)\n\tdata = json.loads(requests.get(__URL_BASE__+api, params=dict(dic, **kw)).text)\n\tif data[\"success\"] and returnkey: return ArkyDict(data[returnkey])\n\telse: return ArkyDict(data)\n\n\nclass ArkyDict(dict):\n\t\"\"\"\nPython dict with javascript behaviour.\n>>> ad = ArkyDict()\n>>> ad[\"key1\"] = \"value1\"\n>>> ad.key2 = \"value2\"\n>>> ad\n{'key2': 'value2', 'key1': 'value1'}\n\"\"\"\n\t__setattr__ = lambda obj,*a,**k: dict.__setitem__(obj, *a, **k)\n\t__getattr__ = lambda obj,*a,**k: dict.__getitem__(obj, *a, **k)\n\t__delattr__ = lambda obj,*a,**k: dict.__delitem__(obj, *a, **k)\n\n\ndef swich(net=False):\n\t\"\"\"\nSwich between mainnet and testnet\n>>> swich(True) # use mainnet\n>>> swich(False) # use testnet\n\"\"\"\n\tglobal __NETWORK__, __URL_BASE__, __HEADERS__\n\n\t__NETWORK__ = ArkyDict()\n\t__HEADERS__ = ArkyDict()\n\n\tif net:\n\t\t# values are not all correct\n\t\t__URL_BASE__ = \"http://node1.arknet.cloud:4000\"\n\t\t__NETWORK__.update(\n\t\t\tmessagePrefix = b\"\\x18Ark Signed Message:\\n\",\n\t\t\tbip32 = ArkyDict(public=0x043587cf, private=0x04358394),\n\t\t\tpubKeyHash = b\"\\x6f\",\n\t\t\twif = b\"\\xef\",\n\t\t)\n\t\t__HEADERS__.update({\n\t\t\t'Content-Type': 'application/json; charset=utf-8',\n\t\t\t'os': 'arkwalletapp',\n\t\t\t'version': '0.5.0',\n\t\t\t'port': '1',\n\t\t\t'nethash': \"ed14889723f24ecc54871d058d98ce91ff2f973192075c0155ba2b7b70ad2511\"\n\t\t})\n\n\telse:\n\t\t__URL_BASE__ = \"http://node1.arknet.cloud:4000\"\n\t\t__NETWORK__.update(\n\t\t\tmessagePrefix = b\"\\x18Testnet Ark Signed Message:\\n\",\n\t\t\tbip32 = ArkyDict(public=0x0488b21e, private=0x0488ade4),\n\t\t\tpubKeyHash = b\"\\x17\",\n\t\t\twif = b\"\\xaa\",\n\t\t)\n\t\t__HEADERS__.update({\n\t\t\t'Content-Type': 'application/json; charset=utf-8',\n\t\t\t'os': 'arkwalletapp',\n\t\t\t'version': '0.5.0',\n\t\t\t'port': '1',\n\t\t\t'nethash': \"8b2e548078a2b0d6a382e4d75ea9205e7afc1857d31bf15cc035e8664c5dd038\"\n\t\t})\n\nswich(False)\n\n\n# ARK fees according to transactions in SATOSHI\n__FEES__ = ArkyDict({\n\t\"send\": 10000000,\n\t\"vote\": 100000000,\n\t\"delegate\": 2500000000,\n\t\"secondsignature\": 500000000,\n\t\"multisignature\": 500000000,\n\t\"dapp\": 2500000000\n})\n","repo_name":"ravelou/arky","sub_path":"arky/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"45072385586","text":"from flask import Flask, render_template, request\nimport pickle\nfrom sklearn.preprocessing import MinMaxScaler\n\napp = Flask(__name__)\n\nwith open('gb_model.pkl', 'rb') as model_file:\n loaded_gb_model = pickle.load(model_file)\n\n# Routes\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n if request.method == 'POST':\n\n user_id = request.form['user_id']\n age = float(request.form['age'])\n gender = 1 if request.form['gender'] == 'Male' else 0\n salary = float(request.form['salary'])\n \n prediction = loaded_gb_model.predict([[age, gender, salary]])\n\n if prediction[0] == 1:\n result_message = f\"Customer with ID {user_id} is likely to purchase a car.\"\n else:\n result_message = f\"Customer with ID {user_id} is unlikely to purchase a car.\"\n \n return render_template('result.html', prediction=prediction[0], message=result_message)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"smartinternz02/SI-GuidedProject-611318-1698393972","sub_path":"Project Development Phase/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"24913463773","text":"from custom_types import PixelToLight\nfrom utils import get_strip_and_index, group_pixels_to_light_by_pin, assemble_strip_config\n\n\ndef test_get_strip_and_index_none():\n \"\"\"\n too far away to light anything\n \"\"\"\n strip_1 = {\n \"offset\": 2.2,\n \"length\": 1,\n \"leds_per_m\": 30,\n \"gpio_pin\": 18,\n \"reverse\": False,\n }\n strip_2 = {\n \"offset\": 1,\n \"length\": 1,\n \"leds_per_m\": 30,\n \"gpio_pin\": 13,\n \"reverse\": False,\n }\n strips = [strip_1, strip_2]\n\n dist = 3\n\n strip, index = get_strip_and_index(strips, dist)\n assert strip is None\n assert index is None\n\n\ndef test_get_strip_and_index_strip_1():\n \"\"\"\n light strip further away\n \"\"\"\n strip_1 = {\n \"offset\": 2.2,\n \"length\": 1,\n \"leds_per_m\": 30,\n \"gpio_pin\": 18,\n \"reverse\": False,\n }\n strip_2 = {\n \"offset\": 1,\n \"length\": 1,\n \"leds_per_m\": 30,\n \"gpio_pin\": 13,\n \"reverse\": False,\n }\n strips = [strip_1, strip_2]\n\n dist = 2\n\n strip, index = get_strip_and_index(strips, dist)\n assert strip[\"offset\"] == strip_1[\"offset\"]\n assert index is not None\n\n\ndef test_get_strip_and_index_strip_1_reverse():\n \"\"\"\n light strip further away\n \"\"\"\n strip_1 = {\n \"offset\": 2.2,\n \"length\": 1,\n \"leds_per_m\": 30,\n \"gpio_pin\": 18,\n \"reverse\": True,\n }\n strip_2 = {\n \"offset\": 1,\n \"length\": 1,\n \"leds_per_m\": 30,\n \"gpio_pin\": 13,\n \"reverse\": False,\n }\n strips = [strip_1, strip_2]\n\n dist = 2\n\n strip, index = get_strip_and_index(strips, dist)\n assert strip[\"offset\"] == strip_1[\"offset\"]\n expected_index = (\n strip_1[\"length\"] * strip_1[\"leds_per_m\"]\n - (strip_1[\"offset\"] - dist) * strip_1[\"leds_per_m\"]\n )\n assert index == round(expected_index)\n\n\ndef test_get_strip_and_index_strip_2():\n \"\"\"\n light strip closer by\n \"\"\"\n strip_1 = {\n \"offset\": 2.2,\n \"length\": 1,\n \"leds_per_m\": 30,\n \"gpio_pin\": 18,\n \"reverse\": False,\n }\n strip_2 = {\n \"offset\": 1,\n \"length\": 1,\n \"leds_per_m\": 30,\n \"gpio_pin\": 13,\n \"reverse\": False,\n }\n strips = [strip_1, strip_2]\n\n dist = 0.1\n\n strip, index = get_strip_and_index(strips, dist)\n assert strip[\"offset\"] == strip_2[\"offset\"]\n assert index is not None\n\n\ndef test_get_strip_and_index_gap():\n \"\"\"\n don't light anything in strip gap\n \"\"\"\n strip_1 = {\n \"offset\": 2.2,\n \"length\": 1,\n \"leds_per_m\": 30,\n \"gpio_pin\": 18,\n \"reverse\": False,\n }\n strip_2 = {\n \"offset\": 1,\n \"length\": 1,\n \"leds_per_m\": 30,\n \"gpio_pin\": 13,\n \"reverse\": False,\n }\n strips = [strip_1, strip_2]\n\n dist = 1.1\n\n strip, index = get_strip_and_index(strips, dist)\n assert strip is None\n assert index is None\n\n\ndef test_group_pixels_by_pin():\n \"\"\"\n group pixels to be lit correctly by pin number\n \"\"\"\n pixel_1: PixelToLight = {\"pin\": 1, \"pixel_index\": 3, \"color\": \"#f00000\"}\n pixel_2: PixelToLight = {\"pin\": 2, \"pixel_index\": 3, \"color\": \"#ffffff\"}\n pixel_3: PixelToLight = {\"pin\": 2, \"pixel_index\": 4, \"color\": \"#000000\"}\n\n grouped = group_pixels_to_light_by_pin([pixel_1, pixel_2, pixel_3])\n\n assert 1 in grouped.keys()\n assert 2 in grouped.keys()\n assert len(grouped[2]) == 2\n\n\ndef test_assemble_strip_config():\n \"\"\"\n assemble strip config correctly\n \"\"\"\n strip_1 = {\n \"offset\": 2.2,\n \"length\": 1,\n \"leds_per_m\": 30,\n \"gpio_pin\": 18,\n \"reverse\": False,\n }\n strip_2 = {\n \"offset\": 1,\n \"length\": 1,\n \"ledsPerM\": 60,\n \"gpioPin\": 13,\n \"reverse\": False,\n }\n strips = [strip_1, strip_2]\n\n strip_config = assemble_strip_config(strips)\n\n assert len(strip_config) == 2\n assert strip_config[0][\"gpio_pin\"] == strip_1[\"gpio_pin\"]\n assert strip_config[0][\"leds_per_m\"] == strip_1[\"leds_per_m\"]\n assert strip_config[1][\"gpio_pin\"] == strip_2[\"gpioPin\"]\n assert strip_config[1][\"leds_per_m\"] == strip_2[\"ledsPerM\"]","repo_name":"creimers/dont-led-me-down","sub_path":"test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":4210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"8244555974","text":"\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val, children):\n self.val = val\n self.children = children\n\"\"\"\nclass Solution:\n def postorder(self, root: 'Node') -> List[int]:\n # first a recursive solution\n if not root: return []\n if not root.children: return [root.val]\n \n ls = []\n for i in root.children:\n ls = ls + self.postorder(i)\n ls = ls + [root.val]\n return ls\n \n # and now an iterative solution\n # although for the record iteration and trees go together... poorly.\n \n # tbh right now i'm too tired to finish this but there's a decent chance i'll finish it tomorrow\n # honestly i'm exhausted, i already solved the problem the intuitive way,\n # and i'm gonna store it on github before i go to bed so i don't lose it\n # but for the record it's just an iterative dfs (which is a shitty way to code a dfs)\n # and we just shove stuff in a stack. idk i'm so tired you guys.\n \n '''if not root: return []\n if not root.children: return [root.val]\n \n ls = [] # an actual list of the elts\n stack = [] # the stack we're drawing from'''\n","repo_name":"annasw/LeetCode","sub_path":"586-N-ary-Tree-Postorder-Traversal.py","file_name":"586-N-ary-Tree-Postorder-Traversal.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"11169635225","text":"import InputReader\n\n\ndef main():\n keys = InputReader.readInputFileNumbers(25)\n subjectNumber = 7\n\n encryptionKey = getKey(keys[1], getLoopSize(keys[0], subjectNumber))\n print(f\"The encryption key is {encryptionKey}.\")\n\n\ndef getKey(subjectNumber, loopSize):\n \"\"\"Generates the encryption key from the subject number and the loop size.\n \n The encryption key starts as 1.\n Then the following actions get executed loop size times:\n \n * multiply the previous value by the subject number\n * set the encryption key to the remainder from dividing the previous\n result by 20201227\n \n Parameters\n ----------\n subjectNumber: int\n The subject number to use in the calculation.\n loopSize: int\n How many times to execute the calculation.\n \n Returns\n -------\n int\n The resulting encryption key.\n \"\"\"\n value = 1\n for _ in range(loopSize):\n value = value * subjectNumber % 20201227\n return value\n\n\ndef getLoopSize(key, subjectNumber):\n \"\"\"Reverse engineers the loop size from the given key and subject number.\n \n This is done by continually dividing the key by the subject number\n until the result matches 1.\n If the result has decimal digits 20201227 gets added to the previous\n key, before it is divided again.\n By counting the divisions the loop size can be determined.\n \n Parameters\n ----------\n key: int\n The key to get the loop size for.\n subjectNumber: int\n The subject number used to generate the key.\n \n Returns\n -------\n int\n The loop size used to generate the given key.\n \"\"\"\n loopSize = 0\n while key != 1:\n newKey = key / subjectNumber\n while newKey % 1 != 0:\n key += 20201227\n newKey = key / subjectNumber\n\n key = newKey\n loopSize += 1\n\n return loopSize\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ToMe25/AdventOfCode","sub_path":"2020/Python/src/Day25.py","file_name":"Day25.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"4786913438","text":"\"\"\"\nFibonacci\n\nCreated on Mon Feb 28, 2022\n\n@author: Th3-Al7ha10\n\n\n\"\"\"\npos = input('Please enter the position of the number in Fibonacci sequence \\n')\n\ndef fibonacci (k):\n\n if k==1:\n return 0\n else:\n return fibonacci(k-1) + k-1\n \nfor i in range (1,pos+1):\n print('{}e position: {}'.format(i,fibonacci(i)))\n","repo_name":"Th3-Al7ha10/Python-Projects-Level-1","sub_path":"fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"28343708803","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 9 09:38:22 2017\n\n@author: neal\n\"\"\"\nimport csv\n\nclass LoadSensorTsvFile:\n \"\"\"\n \"\"\"\n def __init__(self):\n self.files_data = []\n self.feature = []\n \n pass\n \n def read_in_file(self, file): \n csv_reader = csv.reader(open(file))\n file_data = [[0]*51]*7\n row_num = 0\n for row in csv_reader:\n file_data[row_num] = row[0].split('\\t')\n del file_data[row_num][-1]\n for i in range(len(file_data[row_num])):\n file_data[row_num][i] = float(file_data[row_num][i])\n row_num += 1\n# print file_data\n self.files_data.append(file_data)\n\n \n def read_in_files(self, file_path=\"\"):\n \"\"\"\n file_path: Folder containing data files\n \"\"\"\n files = []\n for i in range(8):\n files.append(file_path + \"/direction_\" + str(i+1) + \".tsv\")\n for file in files:\n self.read_in_file(file)\n \n def get_feature(self, feature_func, slice_range=[23, 28]):\n \"\"\"\n \"\"\"\n for file_data in self.files_data:\n feature_func(file_data, slice_range)\n# print self.feature\n \n def slice_avg(self, file_data, slice_range):\n \"\"\"Calculate the average value of a slice of the data\n \"\"\"\n row_num = 0\n feature_data = [0] * 7\n for row in file_data:\n feature_data[row_num] = sum(row[slice_range[0]:slice_range[1]]) / (slice_range[1] - slice_range[0])\n row_num += 1\n self.feature.append(feature_data)\n \n#tsv_file = LoadSensorTsvFile()\n#tsv_file.read_in_files('050801_yangguang')\n#tsv_file.get_feature(tsv_file.slice_avg)\n","repo_name":"nealyang2017/yang20170715","sub_path":"LoadSensorTsvFile.py","file_name":"LoadSensorTsvFile.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"17566990529","text":"def even_odd(*args):\n command = args[-1]\n nums = []\n if command=='even':\n for i in args[:-1]:\n if i % 2 == 0:\n nums.append(i)\n return nums\n elif command == 'odd':\n for i in args[:-1]:\n if i % 2 != 0:\n nums.append(i)\n return nums\n\n\nprint(even_odd(1, 2, 3, 4, 5, 6, \"even\"))\nprint(even_odd(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, \"odd\"))","repo_name":"ilias511/Advanced","sub_path":"Functions_Advanced/Even or Odd.py","file_name":"Even or Odd.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"}
+{"seq_id":"19825449823","text":"import os\n\nfrom dvc.api import DVCFileSystem\n\n\ndef main(config):\n if os.path.isdir(f\"./data/{config.dataset}\"):\n print(f\"The folder \\\"{config.dataset}\\\" exists\")\n else:\n print(\"Dataset download begins\")\n url = \"https://github.com/Natalka-Pro/myops_tools.git\"\n fs = DVCFileSystem(url, rev=\"main\")\n fs.get(\"./data\", \"./\", recursive=True)\n print(\"Dataset download completed\")\n","repo_name":"Natalka-Pro/MYopsTools","sub_path":"myops_tools/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"11770180415","text":"\"\"\"\n*****************************************************************************\n Reads an excel file, extracts the information and store them in\n an organized list which contains a tuple of the information in each\n row in our excel file\n*****************************************************************************\n\"\"\"\nimport openpyxl\n\n\nclass ExcelReader:\n \"\"\"Creates a constructor of our class\"\"\"\n def __init__(self, excel_doc):\n self._excel_doc = excel_doc\n self._sheet = None\n\n # Gets the required sheet to extract our data\n def required_sheet(self, active_sheet):\n excel_doc = openpyxl.load_workbook(self._excel_doc)\n r_sheet = excel_doc[active_sheet]\n self._sheet = r_sheet\n\n # Extracts the data store them in a list\n def extract_data_to_list(self):\n # extracts the raw data and keep in a list\n list_data = list()\n for row in self._sheet.iter_rows():\n prime_list = list()\n for cell in row:\n prime_list.append(cell.value)\n list_data.append(prime_list)\n return list_data\n\n # contains the well formatted data in the form a tuple (name, email_address)\n def email_info(self):\n email_info_list = list()\n data = self.extract_data_to_list()\n for i in range(1, len(data)):\n info = [data[i][0], data[i][1]]\n email_info_list.append(tuple(info))\n return email_info_list\n","repo_name":"mofirojean/Emailer_using_python","sub_path":"emailer_excel_reader.py","file_name":"emailer_excel_reader.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"}
+{"seq_id":"32185486180","text":"import torch\nimport torchvision\n\ndef save_some_examples(gen, val_loader, epoch, device):\n x, y = next(iter(val_loader))\n x, y = x.to(device).squeeze(1), y.to(device).squeeze(1)\n gen.eval()\n with torch.no_grad():\n _, y_fake = gen(x)\n y_fake = y_fake * 0.5 + 0.5 # remove normalization\n torchvision.utils.save_image(y_fake, f\"/y_gen_{epoch}.png\")\n torchvision.utils.save_image(x * 0.5 + 0.5, f\"/input_{epoch}.png\")\n if epoch == 1:\n torchvision.utils.save_image(y, f\"/label_{epoch}.jpg\")\n gen.train()\n\ndef save_checkpoint(model, optimizer, filename=\"my_ckpt.pth\"):\n print(\"=> saving checkpoint...\")\n checkpoint = {\n \"state_dict\": model.state_dict(),\n \"optimizer\": optimizer.state_dict(),\n }\n torch.save(checkpoint, filename)\n\ndef load_checkpoint(checkpoint_file, model, optimizer, lr, device):\n print(\"=> loading checkpoint...\")\n checkpoint = torch.load(checkpoint_file, map_location=device)\n model.load_state_dict(checkpoint[\"state_dict\"])\n optimizer.load_state_dict(checkpoint[\"optimizer\"])\n\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = lr\n\n","repo_name":"yotamraz/LootedArt","sub_path":"pix2pix/utils/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"19138006949","text":"#from models.lenet5 import Lenet\nimport torch\nimport numpy as np\nfrom itertools import chain, combinations\nimport os\nfrom sklearn.linear_model import LinearRegression\nfrom collections import OrderedDict\nfrom operator import itemgetter\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\ndef oracle(dic, size, nodenum, adding):\n\n #smaller values means this set (which is zeroed out) bring highest loss\n keys = list(dic.keys())\n if adding:\n mm = [i for i in keys if len(i) == nodenum - size]\n else:\n mm = [i for i in keys if len(i) == size]\n\n lal = {k: dic[k] for k in mm}\n d_rev = OrderedDict(sorted(lal.items(), key=itemgetter(1), reverse=True))\n\n d_keys = list(d_rev)\n d_values = list(d_rev.values())\n\n # we take the complement of the set\n # adding. we take biggest\n if adding:\n d_keys_comp =[]\n for key in range(len(d_keys)):\n comp = set(np.arange(nodenum)) - set(d_keys[key])\n d_keys_comp.append(comp)\n\n return d_keys_comp[:5], d_values[:5]\n\n # removing, we take smallest, removing hurts the accuracy\n else: #best to remove\n return d_keys[:5], d_values[:5]\n\n\ndef oracle_get(dic, param, rank):\n # oracle\n print(\"\\nOracle adding\\n\") # adding means its loss is the biggest when removing just one\n good=0; all=0\n for o in range(1, 6):\n # get the best set of size o\n set_oracle, val_oracle = oracle(dic, o, param.shape[0], True)\n print(f\"\\nOracle best to add for {param.shape[0] - o}:\")\n print(set_oracle, val_oracle)\n ora = set(list(set_oracle[:o][0]))\n ran = set(rank[:o])\n inter = ora.intersection(ran)\n good+=len(inter); all+=len(ran)\n print(f\"Acc add: {good/float(all)}\")\n print(\"\\nOracle removing\\n\")\n good = 0; all = 0\n for o in range(1, 6):\n set_oracle, val_oracle = oracle(dic, o, param.shape[0], False)\n print(f\"\\nOracle vest to remove for {o}:\")\n print(set_oracle, val_oracle)\n ora = set(list(set_oracle[:o][0]))\n ran = set(rank[-o:])\n inter = ora.intersection(ran)\n good += len(inter);\n all += len(ran)\n print(f\"Acc remov: {good / float(all)}\")\n\n\ndef shapley_rank(evaluate, net, net_name, checkpoint_name, dataset, file_load, k_num, method, sample_num, adding, layer=\"None\", criterion=\"dummy\", args=None, path=None):\n path_file = \"sv/Lenet/combin\"\n print(\"Computing Shapley rank in two stages\")\n print(f\"Shapley method: {method}\")\n # just to check the original accuracy without any pruning\n if net_name==\"Resnet50\":\n acc = 76.13\n #acc = evaluate(dataset, net, criterion, args) # datset is val_laoder\n elif net_name==\"resnet\": #from grad_drop\n acc = evaluate(net, dataset)\n else:\n acc = evaluate(net, \"test\")\n # compute combinations/ characteristic function\n if path is None:\n path=f\"../methods/sv/{net_name}/{method}\"\n os.makedirs(path, exist_ok=True)\n shap_ranks=[]; shap_ranks_dic = {}\n\n for layer_name, param in net.named_parameters():\n # for a particular layer indicated in args\n if layer != \"None\":\n if layer==layer_name:\n pass\n else:\n continue\n print(layer_name)\n\n if \"weight\" in layer_name and \"bn\" not in layer_name and \"out\" not in layer_name:\n print(\"Lay\")\n #if \"weight\" in layer_name and \"bn\" not in layer_name and \"out\" not in layer_name and \"fc\" in layer_name: #remove after cvpr2022\n if not \"Resnet\" in net_name or (\"Resnet\" in net_name and (\"layer\" in layer_name or \"fc\" in layer_name or layer_name==\"module.conv1.weight\")):\n #if not \"Resnet\" in net_name or (\"Resnet\" in net_name and \"fc\" in layer_name):\n\n print(\"Layer: \", layer_name)\n global file_name, file_name_new, file_name_old\n file_name = f\"{path}/{method}_pruning_{checkpoint_name}_{layer_name}\"\n file_name_new = file_name + \"_new.txt\"\n file_old = file_name + \".txt\"\n file_name_old = file_name + \".txt\"\n if not os.path.isfile(file_name_old):\n with open(file_name_old, \"a+\") as f:\n f.write((str(param.shape[0])+\"\\n\"))\n\n if method == \"kernel\":\n if not file_load: # kernshap writes the results to file\n shap_arr = kernshap(True, net, net_name, layer_name, evaluate, dataset, k_num, param, sample_num, \"zeroing\", args, criterion)\n dic, nodes_num = readdata_notsampled(file_old, acc)\n print(f\"Read from {file_old}\")\n print(f\"Number of samples: {len(dic.keys())}\")\n\n reg = LinearRegression().fit(list(dic.keys())[1:], list(dic.values())[1:])\n shap_arr = reg.coef_\n shap_arr=-1*shap_arr\n print(\"shaps\\n\", shap_arr)\n\n if method == \"random\":\n if not file_load:\n randomshap(True, net, net_name, checkpoint_name, layer_name, evaluate, dataset, k_num, param, sample_num, \"zeroing\")\n shap_arr = readdata_notsampled_random(file_old, acc)\n print(\"shaps\\n\", shap_arr)\n #shap_arr = file_read(\"random\", net_name, checkpoint_name, layer_name)\n\n if method == \"combin\":\n if not file_load:\n compute_combinations_lenet(True, net, net_name, layer_name, evaluate, dataset, k_num, \"zeroing\")\n dic, nodes_num = readdata_notsampled_combin(file_name_new, acc)\n #k_num=1; adding=False\n print(f\"\\nExact partial for {k_num} and adding: {adding}\")\n shap_arr = exact_partial(dic, nodes_num, acc, adding, k_num)\n la=np.argsort(shap_arr)[::-1]\n print(\",\".join(map(str, la)))\n\n shap_rank = np.argsort(shap_arr)[::-1]\n print(shap_rank)\n shap_ranks.append(shap_rank)\n shap_ranks_dic[layer_name]=shap_rank\n\n #get oracle\n file_name = f\"../methods/sv/{net_name}/combin/combin_pruning_{checkpoint_name}_{layer_name}\"\n file_name_new = file_name + \"_new.txt\"\n\n\n # compute the intersection of the rank selected above and the oracle set\n # maybe commented on the server\n if os.path.isfile(file_name_new):\n dic, nodes_num = readdata_notsampled_combin(file_name_new, acc)\n oracle_get(dic, param, shap_rank)\n\n\n return shap_ranks, shap_ranks_dic\n\n\n# def file_read_npy(meth, net_name, checkpoint_name, layer):\n# if meth==\"random\":\n# samples_most=0\n# for fname in os.listdir(f'../methods/sv/{net_name}/{meth}'):\n# core_name = f\"{meth}shap_{checkpoint_name}_{layer}_samp_\"\n# if core_name in fname:\n# samp_num_temp = fname.replace(core_name, \"\")\n# samp_num = samp_num_temp.replace(\".npy\", \"\")\n# samples_num = int(samp_num)\n# if samples_num>samples_most:\n# samples_most = samples_num\n# #loading file\n# path_meth = f\"../methods/sv/{net_name}/{meth}/{meth}shap_{checkpoint_name}_{layer}_samp_{samples_most}.npy\"\n# randsvs = np.load(path_meth)\n# print(f\"Loaded {meth} Shapley file from {path_meth}\")\n# return randsvs\n\n\ndef file_check(method):\n if method==\"combin\":\n # check if new results have more lines than the previous one\n file_old = file_name + \".txt\"\n file_new = file_name + \"_new.txt\"\n if os.path.exists(file_old):\n num_lines_old = sum(1 for line in open(file_old, \"r\"))\n num_lines_new = sum(1 for line in open(file_new, \"r\"))\n # if num_lines_old > num_lines_new:\n # os.remove(file_new)\n # else:\n # os.remove(file_old)\n # os.rename(file_new, file_old)\n else:\n os.rename(file_new, file_old)\n\n\n# taken form ranking/results_compression/lenet_network_pruning_withcombinations.py\ndef compute_combinations_lenet(file_write, net, net_name, layer, evaluate, dataset, k_num, perturbation_method):\n print(\"1. Computing combinations\")\n\n acc = evaluate(net, \"test\")\n print(\"from other\")\n # for name, param in net.named_parameters():\n # print(name)\n for name, param in net.named_parameters():\n print(\"Working on the layer: \", layer)\n # find a layer (weight and bias) where we compute rank\n\n if layer in name:\n if file_write:\n with open(file_name_new, \"a+\") as textfile:\n textfile.write(str(param.shape[0])+\"\\n\")\n if \"Resnet\" not in net_name:\n layerbias = layer[:-6] + \"bias\" #:3 for lenet\n params_bias = net.state_dict()[layerbias]\n all_results = {}\n # get s and r to compute the (s choose r)\n s = torch.arange(0, param.shape[0]) # list from 0 to 19 as these are the indices of the data tensor\n # get the alternating elements in the channel list to have the most combinations from the beginning and end first\n a = np.arange(0, param.shape[0]+1)\n channel_list = [a[-i // 2] if i % 2 else a[i // 2] for i in range(len(a))]\n channel_list=channel_list[:] if k_num==None else channel_list[:k_num]\n #for r in range(1, param.shape[0]): # produces the combinations of the elements in s\n for r in channel_list:\n print(r)\n results = []\n for combination in list(combinations(s, r)):\n combination = torch.LongTensor(combination)\n #print(combination)\n # save current values in a placeholder\n params_saved = param[combination].clone();\n if \"Resnet\" not in net_name:\n param_bias_saved = params_bias[combination].clone()\n # zero out a subset of the channels\n if perturbation_method == \"zeroing\":\n\n\n ## param[combination] = 0\n ## if net_name is not \"Resnet\":\n ## params_bias[combination] = 0\n ## accuracy = evaluate(net, \"val\")\n\n # add noise to subset of channels (experimental feature)\n # elif perturbation_method == \"additive_noise\":\n # # norm_dist=torch.distributions.Normal(0,0.1)\n # # param[combination[0]] += norm_dist.sample(param[combination[0]].shape).to(device)\n # # multiplying by noise\n # # norm_dist = torch.distributions.Normal(1, 0.1)\n # # param[combination[0]] *= norm_dist.sample(param[combination[0]].shape)\n # # adding noise\n # accuracies = []\n # for i in range(5):\n # norm_dist = torch.distributions.Normal(0, 0.1)\n # param[combination[0]] += norm_dist.sample(param[combination[0]].shape)\n # accuracies.append(evaluate())\n # accuracy = np.mean(accuracies)\n # print(\"Averaged accuracy: \", accuracy)\n ########################################333\n # accuracy = evaluate(net)\n\n ##param.data[combination] = params_saved\n ##if net_name is not \"Resnet\":\n ## params_bias.data[combination] = param_bias_saved\n\n accuracy = check_combination(net, net_name, combination, param, evaluate, params_bias)\n\n\n results.append((combination, accuracy))\n # write the combinations to the file\n if file_write:\n with open(file_name_new, \"a+\") as textfile:\n textfile.write(\"%s: %.2f\\n\" % (\",\".join(str(x) for x in combination.numpy()), accuracy))\n\n all_results[r] = results\n file_check(\"combin\")\n\n\ndef exact_partial(dic, nodesNum, original_acc, adding, K_param=0):\n #minus means actually plus because we remove it from the list of 0s, so add to the list of non-zeros\n #dic[tuple(np.arange(nodesNum))] = 10 #random accuracy of no all zeros\n dic[()] = original_acc\n m = list(dic.keys())\n m.sort(key=lambda t: len(t), reverse=True)\n\n shaps = np.zeros(nodesNum)\n shaps_samps = np.zeros(nodesNum)\n N = nodesNum\n for elem in m:\n val1 = dic[elem]\n #print(\"el: \", elem, \"val: \", val1)\n if len(elem) == 1:\n mama = 0\n elem_set = set(elem)\n if adding:\n thresh = (nodesNum - K_param)\n else:\n thresh = K_param\n for i in elem:\n elem_set.remove(i)\n elem_plus = tuple(elem_set)\n K = len(elem_plus)\n if (K>=thresh and adding) or (K+1<=thresh and not adding):\n #if K >1 and tuple(elem_plus) in m:\n if tuple(elem_plus) in m:\n val2 = dic[elem_plus]-val1\n # elif len(elem_plus)==1:\n # val2 = dic[elem_plus]\n #print(print(\"i: \", i, \"val2: \", val2, \"el: \", elem_plus, \"val: \", dic[elem_plus]))\n\n coeff = np.math.factorial(N-K-1)*np.math.factorial(K)\n shaps[i]+=val2*coeff\n shaps_samps[i]+=1\n elem_set.add(i)\n\n svs = np.divide(shaps, np.math.factorial(N))\n print(\"svs\", svs)\n return svs\n\n\n\ndef check_combination(net, net_name, combination, param, evaluate, params_bias, args=None, criterion=None, loader=None):\n combination = torch.LongTensor(combination)\n print(combination)\n params_saved = param[combination].clone()\n if \"Resnet\" not in net_name and \"resnet\" not in net_name:\n param_bias_saved = params_bias[combination].clone()\n\n #param[combination[0]] = 0\n param.data[combination] = 0\n #print(\"Sum:\\n \", torch.sum(param, axis=(1, 2, 3)))\n if \"Resnet\" not in net_name and \"resnet\" not in net_name:\n params_bias[combination] = 0\n\n if net_name is not \"Resnet50\" and \"resnet\" not in net_name: #resnet50\n accuracy = evaluate(net, \"val\")\n elif \"resnet\" in net_name:\n accuracy = evaluate(net, loader)\n else:\n accuracy = evaluate(loader, net, criterion, args)\n\n param.data[combination] = params_saved\n if \"Resnet\" not in net_name and \"resnet\" not in net_name:\n params_bias.data[combination] = param_bias_saved\n\n return accuracy\n\ndef write_file(file_write, comb, acc):\n if file_write:\n with open(file_name_old, \"a+\") as textfile:\n textfile.write(\"%s: %.2f\\n\" % (\",\".join(str(x) for x in comb), acc))\n print(f\"Saved in {file_name_old}\")\n\ndef kernshap(file_write, net, net_name, layer, evaluate, dataset, k_num, param, samples_num=10, perturbation_method=None, args=None, criterion=None):\n\n if \"Resnet\" not in net_name and \"resnet\" not in net_name:\n layerbias = layer[:-6] + \"bias\" #:3 for lenet\n params_bias = net.state_dict()[layerbias]\n else:\n params_bias = None\n\n # if file_write:\n # with open(file_name, \"a+\") as textfile:\n # textfile.write(str(param.shape[0])+\"\\n\")\n\n combinations_bin = np.zeros((samples_num, param.shape[0]))\n accuracies = np.zeros(samples_num)\n for i in range(samples_num):\n print(f\"samp: {i}\")\n randperm = np.random.permutation(param.shape[0])\n randint = 0\n while (randint == 0):\n randint = np.random.randint(param.shape[0])\n randint_indextoremove = np.random.randint(randint)\n combination = randperm[:randint]\n combination2 = np.delete(combination, randint_indextoremove)\n print(combination[randint_indextoremove])\n\n acc = check_combination(net, net_name, combination, param, evaluate, params_bias, args, criterion, dataset)\n\n\n combinations_bin[i, combination] = 1\n accuracies[i]=acc\n\n write_file(file_write, combinations_bin[i], accuracies[i])\n\n #file_check()\n\n dumm=1\n return\n\n\ndef randomshap(file_write, net, net_name, checkpoint_name, layer, evaluate, dataset, k_num, param, samples_num=10,\n perturbation_method=None):\n if \"Resnet\" not in net_name :\n layerbias = layer[:-6] + \"bias\" #:3 for lenet\n params_bias = net.state_dict()[layerbias]\n else:\n params_bias = None\n\n acc_val = evaluate(net, \"val\")\n\n shaps = np.zeros(param.shape[0])\n combinations_bin = np.zeros((samples_num, param.shape[0]))\n accuracies = np.zeros(samples_num)\n for i in range(samples_num):\n print(f\"\\nSample num: {i}\")\n randperm = np.random.permutation(param.shape[0])\n last_acc = acc_val\n nums = []; marginals = [];\n for j in range(param.shape[0]):\n elem = randperm[j]\n print(f\"\\n\\nChannel marginal check: {elem}\")\n combination = randperm[:j+1]\n acc = check_combination(net, net_name, combination, param, evaluate, params_bias)\n marginal = last_acc - acc\n last_acc = acc\n shaps[elem]+= marginal\n\n nums.append(combination); marginals.append(acc)\n for k in range(len(nums)):\n write_file(file_write, nums[k], marginals[k])\n\n if i % 10 == 0 or i==samples_num-1:\n print(shaps)\n randsvs = shaps/(i+1)\n print(randsvs)\n print(np.argsort(randsvs)[::-1])\n #np.save(f\"../methods/sv/{net_name}/random/randomshap_{checkpoint_name}_{layer}_samp_{(i+1)}.npy\", randsvs)\n return randsvs\n\n\n\n# CHOOSES RANDOM COMBINATION and then removed one of the random nodes and computes accuracy for that node\n# from ranking/results_compression/network_pruning_withcombinstions.py\ndef compute_combinations_random(file_write, net, evaluate):\n for name, param in net.named_parameters():\n print(name)\n print(param.shape)\n layer = \"c5.weight\"\n # find a layer (weight and bias) where we compute rank\n\n if layer in name:\n layerbias = layer[:3] + \"bias\"\n params_bias = net.state_dict()[layerbias]\n while (True):\n\n all_results = {}\n # s=torch.range(0,49) #list from 0 to 19 as these are the indices of the data tensor\n # for r in range(1,50): #produces the combinations of the elements in s\n # results=[]\n randperm = np.random.permutation(param.shape[0])\n randint = 0\n while (randint == 0):\n randint = np.random.randint(param.shape[0])\n randint_indextoremove = np.random.randint(randint)\n combination = randperm[:randint]\n combination2 = np.delete(combination, randint_indextoremove)\n print(combination[randint_indextoremove])\n\n if file_write:\n with open(\"results_running/combinations_pruning_mnist_%s_%s.txt\" % (path[7:], layer), \"a+\") as textfile:\n textfile.write(\"%d\\n\" % randint_indextoremove)\n for combination in [combination, combination2]:\n # for combination in list(combinations(s, r)):\n combination = torch.LongTensor(combination)\n print(combination)\n params_saved = param[combination].clone()\n param_bias_saved = params_bias[combination].clone()\n # param[torch.LongTensor([1, 4])] = 0\n # workaround, first using multiple indices does not work, but if one of the change first then it works to use param[combinations]\n if len(combination) != 0:\n param[combination[0]] = 0\n # param[combination]=0\n params_bias[combination] = 0\n accuracy = evaluate()\n param.data[combination] = params_saved\n params_bias.data[combination] = param_bias_saved\n if file_write:\n with open(\"results_running/combinations_pruning_fashionmnist_%s_%s.txt\" % (path[7:], layer),\n \"a+\") as textfile:\n textfile.write(\"%s: %.2f\\n\" % (\",\".join(str(x) for x in combination.numpy()), accuracy))\n\n # all_results[r]=results\n\n # import pickle\n # filename='combinations_all_results_rel_bn_%d.pkl' % r\n # file=open(filename, 'wb')\n # pickle.dump(all_results, file)\n # file.close()\n\n\n#############################################3\n# copied from ranking/results_shapley/shapley.py\n\n# READ ONLY DATA\n# not sampled, we take all the combinations of size 1, then all the combinations of size 2, etc.\n\n# reads into dic 0,6 : 98.51\n# 6: 98.82\n# 7: 98.17\n# 8: 98.57\n# 9: 99.02\n# 0,1: 97.65\n# 0,2: 98.83\n# 0,3: 98.63\n# 0,4: 98.80\n\n\ndef readdata_notsampled_marginals(file, original_accuracy):\n f = open(file)\n dict = {(): 0}\n nodes_num = int(next(f)[:-1]) # number of points, first line of the file only\n shap=np.zeros(nodes_num)\n for i in range(nodes_num):\n dict[i]=[]\n for line in f:\n linesplit = line.strip().split(\":\")\n tup = int(linesplit[0])\n acc = float(linesplit[1])\n #dict[tup] = original_accuracy - acc\n dict[tup].append(acc)\n #print(tup, acc)\n f.close()\n for m in range(nodes_num):\n shap[m]=np.average(dict[m])\n return shap\n\n\ndef readdata_notsampled(file, original_accuracy):\n f = open(file)\n nodes_num = next(f)[:-1] # number of points, first line of the file only\n #line = next(f)\n #linesplit = line.strip().split(\":\")\n #original_accuracy2 = float(linesplit[1])\n dict = {(): original_accuracy}\n for line in f:\n\n\n\n #print(line)\n linesplit = line.strip().split(\":\")\n #try:\n tup = tuple(int(float(i) )for i in linesplit[0].split(\",\"))\n #except:\n # lala=7\n acc = float(linesplit[1])\n #dict[tup] = original_accuracy - acc\n dict[tup]=acc\n #print(tup, acc)\n f.close()\n return dict, int(nodes_num)\n\ndef readdata_notsampled_combin(file, original_accuracy):\n f = open(file)\n nodes_num = next(f)[:-1] # number of points, first line of the file only\n line = next(f)\n linesplit = line.strip().split(\":\")\n original_accuracy2 = float(linesplit[1])\n dict = {(): original_accuracy2}\n for line in f:\n #print(line)\n\n linesplit = line.strip().split(\":\")\n #try:\n tup = tuple(int(float(i) )for i in linesplit[0].split(\",\"))\n #except:\n # lala=7\n acc = float(linesplit[1])\n #dict[tup] = original_accuracy - acc\n dict[tup]=acc\n #print(tup, acc)\n f.close()\n return dict, int(nodes_num)\n\n\ndef get_svs(dict, original_accuracy, nodes_num):\n shaps=np.zeros(nodes_num)\n for key in dict.keys():\n keyl = list(key)\n if len(keyl)==0:\n #shaps[keyl[-1]]=original_accuracy-dict[key]\n old_key_val=dict[key]\n else:\n shaps[keyl[-1]]=old_key_val-dict[key]\n old_key_val=dict[key]\n return shaps\n\n\ndef readdata_notsampled_random(file, original_accuracy):\n f = open(file)\n nodes_num = int(next(f)[:-1]) # number of points, first line of the file only\n #line = next(f)\n #linesplit = line.strip().split(\":\")\n #original_accuracy2 = float(linesplit[1])\n i=0\n dict = {(): original_accuracy}\n shaps=np.zeros(nodes_num)\n samps=0\n for line in f:\n i+=1\n linesplit = line.strip().split(\":\")\n tup = tuple(int(float(i) )for i in linesplit[0].split(\",\"))\n acc = float(linesplit[1])\n #dict[tup] = original_accuracy - acc\n dict[tup]=acc\n #print(tup, acc)\n if i == int(nodes_num):\n i=0\n samps+=1\n # compute the difference in a permuattion from removing more and mroe nodes\n shaps_part = get_svs(dict, original_accuracy, nodes_num)\n shaps+=shaps_part\n dict = {(): original_accuracy}\n\n shaps=shaps/samps\n f.close()\n return shaps\n\n\n#######################\n\n# SHAPLEY VALUE\n\n###########################################################\n# copied from ranking/results_shapley/shapley.py\n\n# sampled shapley, \"full\" perms\n# (in quotes because we may not have computed all the perms, but we compute them sequentially\n# to get all of them, e.g. all perms of size 1, all perms of size 2, etc\n\n# for each node we want to compute Shapley value:\n# we get a random permutation and find that node (we count the subset from the beginning up to that node)\n# remove it and chceck the difference if both the subsets are present\n\n# works on such dics\n# 8: 98.57\n# 9: 99.02\n# 0,1: 97.65\n# 0,2: 98.83\n# 0,3: 98.63\n\ndef shapley_samp(dict_passed, nodesnum, samples_num):\n print(\"Partial Random Shapley\")\n dict = dict_passed\n\n # permutations = list(itertools.permutations(elements))\n shap_array = []\n elements_num = nodesnum\n for elem in range(elements_num): # for each element we want to compute SV of\n sum = 0\n dict_elems = 0\n print(elem)\n for i in range(samples_num):\n perm = np.random.permutation(elements_num).tolist()\n # print(perm)\n # we look at all the permutations\n ind = perm.index(elem)\n del perm[ind + 1:]\n perm.sort()\n perm_tuple = tuple(perm)\n perm.remove(elem)\n removed_perm_tuple = tuple(perm)\n if perm_tuple in dict and removed_perm_tuple in dict:\n val = dict[perm_tuple] - dict[removed_perm_tuple]\n sum += val\n # print(val)\n dict_elems += 1\n # print(\"sum: %.2f, perms: %d\" % (sum,dict_elems))\n shap = sum / dict_elems\n print(\"shap: %.2f\" % shap)\n shap_array.append(shap)\n\n return shap_array\n","repo_name":"kamadforge/dirichlet_pruning","sub_path":"methods/shapley_rank.py","file_name":"shapley_rank.py","file_ext":"py","file_size_in_byte":26665,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"}
+{"seq_id":"25326682812","text":"import pygame, sys, time, random\r\n\r\npygame.init()\r\nfps = 60\r\nxo = 'x'\r\nwidth = 400\r\nheight = 400\r\ngame_icon = pygame.image.load(\"images/game_icon.png\")\r\npygame.display.set_icon(game_icon)\r\nwinner = None\r\ndraw = False\r\nwhite = (255, 255, 255)\r\nline_color = (0, 0, 0)\r\nclock = pygame.time.Clock()\r\nttt = [[None] * 3, [None] * 3, [None] * 3]\r\nmode = None\r\n\r\npygame.display.set_caption('Tic Tac Toe')\r\nscreen = pygame.display.set_mode((width, height + 100), 0, 32)\r\n\r\nx_img = pygame.image.load('images/x.png')\r\no_img = pygame.image.load('images/o.png')\r\nhome_img = pygame.image.load('images/home.png')\r\nsinglePlayer = pygame.image.load('images/singlePlayer.png')\r\nmultiPlayer = pygame.image.load('images/multiPlayer.png')\r\n\r\nx_img = pygame.transform.scale(x_img, (80, 80))\r\no_img = pygame.transform.scale(o_img, (80, 80))\r\nhome_img = pygame.transform.scale(home_img, (width, height + 100))\r\nsinglePlayer = pygame.transform.scale(singlePlayer, (200, 200))\r\nmultiPlayer = pygame.transform.scale(multiPlayer, (170, 170))\r\n\r\n\r\ndef home_screen():\r\n global mode\r\n\r\n screen.blit(home_img, (0, 0))\r\n pygame.display.update()\r\n time.sleep(1)\r\n screen.fill(white)\r\n pygame.draw.line(screen, line_color, (0, 250), (width, 250), 7)\r\n screen.blit(singlePlayer, (100, 0))\r\n screen.blit(multiPlayer, (100, 300))\r\n pygame.display.update()\r\n\r\n\r\ndef main_screen():\r\n screen.fill(white)\r\n pygame.draw.line(screen, line_color, (width / 3, 0), (width / 3, height), 7)\r\n pygame.draw.line(screen, line_color, (width / 3 * 2, 0), (width / 3 * 2, height), 7)\r\n\r\n pygame.draw.line(screen, line_color, (0, height / 3), (width, height / 3), 7)\r\n pygame.draw.line(screen, line_color, (0, height / 3 * 2), (width, height / 3 * 2), 7)\r\n status()\r\n\r\n\r\ndef status():\r\n global draw\r\n if winner is None:\r\n message = xo.upper() + \"'s Turn\"\r\n else:\r\n message = winner.upper() + \" won !\"\r\n if draw:\r\n message = \"Game drawn !\"\r\n font = pygame.font.Font(None, 30)\r\n text = font.render(message, 1, (255, 255, 255))\r\n screen.fill((0, 0, 0), (0, 400, 500, 100))\r\n text_rect = text.get_rect(center=(width / 2, 500 - 50))\r\n screen.blit(text, text_rect)\r\n pygame.display.update()\r\n\r\n\r\ndef check_win():\r\n global ttt, winner, draw\r\n\r\n for row in range(0, 3):\r\n if ttt[row][0] == ttt[row][1] and ttt[row][1] == ttt[row][2] and ttt[row][0] is not None:\r\n winner = ttt[row][0]\r\n pygame.draw.line(screen, (250, 0, 0), (0, (row + 1) * height / 3 - height / 6), \\\r\n (width, (row + 1) * height / 3 - height / 6), 4)\r\n break\r\n\r\n for col in range(0, 3):\r\n if (ttt[0][col] == ttt[1][col] == ttt[2][col]) and (ttt[0][col] is not None):\r\n # this column won\r\n winner = ttt[0][col]\r\n # draw winning line\r\n pygame.draw.line(screen, (250, 0, 0), ((col + 1) * width / 3 - width / 6, 0), \\\r\n ((col + 1) * width / 3 - width / 6, height), 4)\r\n break\r\n if (ttt[0][0] == ttt[1][1] == ttt[2][2]) and (ttt[0][0] is not None):\r\n # game won diagonally left to right\r\n winner = ttt[0][0]\r\n pygame.draw.line(screen, (250, 70, 70), (50, 50), (350, 350), 4)\r\n if (ttt[0][2] == ttt[1][1] == ttt[2][0]) and (ttt[0][2] is not None):\r\n # game won diagonally right to left\r\n winner = ttt[0][2]\r\n pygame.draw.line(screen, (250, 70, 70), (350, 50), (50, 350), 4)\r\n if all([all(row) for row in ttt]) and winner is None:\r\n draw = True\r\n status()\r\n\r\n\r\ndef reset():\r\n global ttt, winner, draw, xo\r\n time.sleep(3)\r\n xo = 'x'\r\n winner = None\r\n draw = False\r\n home_screen()\r\n ttt = [[None] * 3, [None] * 3, [None] * 3]\r\n\r\n\r\ndef draw_xo(row, col):\r\n global ttt, xo, posx, posy\r\n if row == 1:\r\n posy = 30\r\n if row == 2:\r\n posy = width / 3 + 30\r\n if row == 3:\r\n posy = width / 3 * 2 + 30\r\n if col == 1:\r\n posx = 30\r\n if col == 2:\r\n posx = height / 3 + 30\r\n if col == 3:\r\n posx = height / 3 * 2 + 30\r\n ttt[row - 1][col - 1] = xo\r\n if xo == 'x':\r\n screen.blit(x_img, (posx, posy))\r\n xo = '0'\r\n else:\r\n screen.blit(o_img, (posx, posy))\r\n xo = 'x'\r\n pygame.display.update()\r\n\r\n\r\ndef user_click():\r\n x, y = pygame.mouse.get_pos()\r\n if x < width / 3:\r\n col = 1\r\n print('h')\r\n elif x < (width / 3) * 2:\r\n col = 2\r\n elif x < width:\r\n col = 3\r\n else:\r\n col = None\r\n if y < height / 3:\r\n row = 1\r\n elif y < (height / 3) * 2:\r\n row = 2\r\n elif y < height:\r\n row = 3\r\n else:\r\n row = None\r\n if row and col and ttt[row - 1][col - 1] is None:\r\n global xo\r\n # draw the x or o on screen\r\n draw_xo(row, col)\r\n check_win()\r\n\r\n\r\ndef user_click_single():\r\n x, y = pygame.mouse.get_pos()\r\n col = 0\r\n if x < width / 3:\r\n col = 1\r\n elif x < (width / 3) * 2:\r\n col = 2\r\n elif x < width:\r\n col = 3\r\n else:\r\n col = None\r\n if y < height / 3:\r\n row = 1\r\n elif y < (height / 3) * 2:\r\n row = 2\r\n elif y < height:\r\n row = 3\r\n else:\r\n row = None\r\n if row and col and ttt[row - 1][col - 1] is None:\r\n global xo\r\n # draw the x or o on screen\r\n draw_xo(row, col)\r\n check_win()\r\n a = random.randrange(1, 4)\r\n b = random.randrange(1, 4)\r\n while ttt[a - 1][b - 1] is not None:\r\n a = random.randrange(1, 4)\r\n b = random.randrange(1, 4)\r\n draw_xo(a, b)\r\n check_win()\r\n\r\n\r\nhome_screen()\r\n\r\nrun = True\r\nwhile run:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n main_screen()\r\n x, y = pygame.mouse.get_pos()\r\n if y > 250:\r\n main_screen()\r\n while True:\r\n for e in pygame.event.get():\r\n if e.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n elif e.type == pygame.MOUSEBUTTONDOWN:\r\n user_click()\r\n if winner or draw:\r\n reset()\r\n pygame.display.update()\r\n clock.tick(fps)\r\n if y < 250:\r\n main_screen()\r\n while True:\r\n for f in pygame.event.get():\r\n print(f.type == pygame.MOUSEBUTTONDOWN)\r\n if f.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n elif f.type == pygame.MOUSEBUTTONDOWN:\r\n user_click_single()\r\n if winner or draw:\r\n reset()\r\n\r\n pygame.display.update()\r\n clock.tick(fps)\r\n","repo_name":"sreshtha10/tictactoeGUI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7130,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"}
+{"seq_id":"25401696152","text":"import random\nfrom tkinter import *\n\nclass Dungeon(object):\n def __init__(self, RoomWidth, RoomHeight):\n self.RoomWidth = RoomWidth\n self.RoomHeight = RoomHeight\n def Resolution(self):\n return \"resolution: %s,%s\" % (self.RoomWidth, self.RoomHeight)\n\nclass Window(object):\n def Create(width, height):\n WindowWidth = width\n WindowHeight = height\n BackGround = 'Khaki'\n _Window = Canvas(width = WindowWidth, height = WindowHeight, bg = BackGround)\n _Window.pack()\n return _Window\n\nif __name__ == \"__main__\":\n width = 600\n height = 600\n WorkSpace = Window.Create(width,height)\n\n \n Room = []\n for num in range(random.randint(10, 50)):\n Room.append(num)\n Room[num] = Dungeon(random.randint(5,50), random.randint(5,50))\n print(\"Room #%s, %s\"% (num, Room[num].Resolution()))\n WorkSpace.create_rectangle(width/2-Room[num].RoomWidth, height/2-Room[num].RoomHeight,\n width/2+Room[num].RoomWidth, height/2+Room[num].RoomHeight,\n outline = 'Blue')\n","repo_name":"ShakulaAndrew/DungeonMaker","sub_path":"DungeonMaker.py","file_name":"DungeonMaker.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"21115329101","text":"#Input : $python mutantSeq.py \n### ENSTtoAA.py output == per/line ::: ;;;;;\n\n#Output for each VCF line\n### \n\n\nimport sys\n\n##call in the VCF file\nvcfFile = sys.argv[1] ## VCF File\nenstFile = sys.argv[2] ## ENSTtoAA.py outputfile\n\nwith open(vcfFile,'r') as i:\n entries = i.readlines()\n\nwith open(enstFile,'r') as i:\n lines = i.readlines()\n\nparsedVCF = []\nfor i in entries:\n if i[0] != '#':\n parsedVCF.append(i)\n\n## Convert the ENSTtoAA.py to a Dictionary with the KEY == ENST ID\nenstDict = {}\nfor i in lines:\n units = i.split(';')\n enstDict[units[0]] = units[1:]\n\nsnpDict = {}\nfor i in parsedVCF:\n info = i.split('\\t')\n newkey = info[0] + ':' + info[1]\n snpDict[newkey] = info[5] ### DICT { : }\n\n### Goal : go through each KEY of the dictionary and find the SNP's with corresponding Chromosome and Location of the CDS regions\n\nfor snp in snpDict:\n print(snp)\n\n\n\n","repo_name":"yuq1993/IBP-SNPeffect","sub_path":"Scripts_Colton/mutantSeq.py","file_name":"mutantSeq.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"17090241756","text":"import logging\nimport posixpath\nfrom urllib.parse import urlparse\n\nimport boto3\nfrom botocore.client import Config\n\nfrom .base import BaseStorage\nfrom .types import FileInfo, FileMetadata, PresignedPostInfo, S3Object\n\nlogger = logging.getLogger(__name__)\n\n\n_config = None\n\n\ndef get_aws_session() -> boto3.Session:\n return boto3.Session()\n\n\ndef get_s3_public_file_url(region: str, bucket_name: str, path: str) -> str:\n return 'https://{name}.s3-{region}.amazonaws.com/{path}'.format(\n region=region, name=bucket_name, path=path)\n\n\nclass S3Storage(BaseStorage):\n \"\"\"Amazon S3 backed storage\n\n This is the recommended choice for production.\n \"\"\"\n\n def __init__(self, bucket_name: str, key_prefix: str = None) -> None:\n self.bucket_name = bucket_name\n self.key_prefix = key_prefix or ''\n\n @classmethod\n def from_url(cls, url): # type: (str) -> BaseStorage\n parsed = urlparse(url)\n if not parsed.netloc:\n raise ValueError(\n 'S3 bucket name missing. '\n 'Make sure the URL is in the form s3://bucket-name/key-prefix')\n return cls(parsed.netloc, key_prefix=parsed.path.lstrip('/'))\n\n def get_file(self, key: str) -> FileInfo:\n full_key = self._get_full_key(key)\n data = self._get_bucket().Object(full_key).get()\n return FileInfo(\n metadata=FileMetadata(\n content_type=data['ContentType']),\n content=data['Body'])\n\n def get_file_meta(self, key: str) -> FileMetadata:\n return self.get_file(key).metadata\n\n def get_file_content(self, key: str) -> bytes:\n return self.get_file(key).content\n\n def get_presigned_post(self, key: str, content_type: str) \\\n -> PresignedPostInfo:\n full_key = self._get_full_key(key)\n\n # s3 = get_aws_session().resource('s3')\n aws = get_aws_session()\n s3 = aws.client('s3', config=Config(signature_version='s3v4'))\n\n presigned_post = s3.generate_presigned_post(\n Bucket=self.bucket_name,\n Key=full_key,\n Fields={\"Content-Type\": content_type},\n Conditions=[\n # TODO: limit file size herx\n {\"Content-Type\": content_type}\n ],\n ExpiresIn=3600)\n return PresignedPostInfo(\n url=presigned_post['url'],\n fields=presigned_post['fields'])\n\n def _get_bucket(self):\n aws = get_aws_session()\n s3 = aws.resource('s3')\n return s3.Bucket(self.bucket_name)\n\n def _get_full_key(self, key: str) -> str:\n return posixpath.join(self.key_prefix, key)\n\n def _get_object(self, key: str) -> S3Object:\n full_key = self._get_full_key(key)\n return self._get_bucket().Object(full_key)\n\n def put_file(self, key: str, data: bytes, mime_type: str = None) -> None:\n self._get_object(key).put(Body=data, ContentType=mime_type)\n\n def get_file_url(self, key: str) -> str:\n full_key = self._get_full_key(key)\n return ('s3://{bucket}/{key}'\n .format(bucket=self.bucket_name, key=full_key))\n\n def file_exists(self, key: str) -> bool:\n from botocore.exceptions import ClientError\n\n aws = get_aws_session()\n s3_client = aws.client('s3')\n try:\n s3_client.head_object(\n Bucket=self.bucket_name,\n Key=self._get_full_key(key))\n except ClientError as e:\n if e.response['Error']['Code'] == \"404\":\n return False\n raise\n return True\n\n def get_etag(self, key: str) -> str:\n return self._get_object(key).e_tag\n","repo_name":"rshk/mowaki-py","sub_path":"mowaki/storage/storage_s3.py","file_name":"storage_s3.py","file_ext":"py","file_size_in_byte":3663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"11332172942","text":"#!/usr/bin/env python3\nimport rospy\nimport math\nimport sys\nimport os\nfrom yolov8_data.msg import Object, ObjectsMSG\nfrom yolov8_data.srv import *\nfrom sensor_msgs.msg import Image as msg_Image\nfrom sensor_msgs.msg import CameraInfo as msg_CameraInfo\nfrom sensor_msgs.msg import CompressedImage as msg_CompressedImage\nfrom sensor_msgs.msg import PointCloud2 as msg_PointCloud2\nfrom geometry_msgs.msg import *\nfrom nav_msgs.msg import *\nimport numpy as np\nimport cv2\nimport queue\nimport yaml\nfrom PIL import Image\nimport torch\nimport copy\nimport gc\nfrom signal import signal, SIGINT\n\nimport lap\nfrom cython_bbox import bbox_overlaps as bbox_ious\n\ntorch.cuda.empty_cache() \nos.environ[\"PYTORCH_CUDA_ALLOC_CONF\"] = \"garbage_collection_threshold:0.6,max_split_size_mb:128\"\n\nsys.path.append('/home/robolab/software/JointBDOE')\nfrom utils.torch_utils import select_device\nfrom utils.general import check_img_size, scale_coords, non_max_suppression\nfrom utils.datasets import LoadImages\nfrom models.experimental import attempt_load\nfrom utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective\nfrom ultralytics import YOLO\n\nimport time\nimport tf\nimport message_filters\nimport torch\n\nsys.path.append('/home/robolab/software/BOSCH-Age-and-Gender-Prediction/models')\nfrom base_block import FeatClassifier, BaseClassifier\nfrom resnet import resnet50\nfrom collections import OrderedDict\nimport torchvision.transforms as T\n\nclass yolov8():\n def __init__(self):\n self.image_queue = queue.Queue(1)\n self.objects_publisher = rospy.Publisher(\"/perceived_people\", ObjectsMSG, queue_size=10)\n self.objects_write = []\n self.objects_read = []\n self.camera_info = None\n\n self.depth_image = []\n self.color_image = []\n\n self.robot_world_transform_matrix = np.array([])\n self.robot_orientation = None\n self.camera_pose_respect_robot = np.array([[1, 0, 0, 0.21331892690256105],\n [0, 1, 0, 0.004864029093594846],\n [0, 0, 1, -0.9769708264898666],\n [0, 0, 0, 1]])\n\n\n self.age_range = [[10, 30], [30, 45], [45, 50], [50, 70]]\n normalize = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n self.valid_tsfm = T.Compose([\n T.Resize((256, 192)),\n T.ToTensor(),\n normalize\n ])\n backbone = resnet50()\n classifier = BaseClassifier(nattr=35)\n self.age_classification_model = FeatClassifier(backbone, classifier)\n\n if torch.cuda.is_available():\n self.age_classification_model = torch.nn.DataParallel(self.age_classification_model).cuda()\n else:\n print(\"AGE CLASSIFICATION MODEL CAN'T BE EXECUTED WITH CUDA\")\n\n self.load_age_predictor_state_dict(self.age_classification_model)\n\n self.width = 640\n self.height = 480\n\n self.color_depth_ratio = None\n self.color_yolo_ratio_height = None\n self.color_yolo_ratio_width = None\n\n self.new_data = False\n\n # self.yolo_model_name = 'yolov8m-seg.pt'\n self.yolo_model_name = 'yolov8n-pose.engine'\n\n self.model_v8 = YOLO(self.yolo_model_name)\n\n self.device = select_device(\"0\", batch_size=1)\n self.model = attempt_load(\"/home/robolab/software/JointBDOE/runs/JointBDOE/coco_s_1024_e500_t020_w005/weights/best.pt\", map_location=self.device)\n self.stride = int(self.model.stride.max())\n with open(\"/home/robolab/software/JointBDOE/data/JointBDOE_weaklabel_coco.yaml\") as f:\n self.data = yaml.safe_load(f) # load data dict\n \n################# SUBSCRIBER CALLBACKS #################\n\n def store_data(self, rgb, depth, odom):\n # print(\"STORING DATA\")\n self.color_image = cv2.cvtColor(np.frombuffer(rgb.data, np.uint8).reshape(rgb.height, rgb.width, 4), cv2.COLOR_RGBA2RGB )\n self.depth_image = np.frombuffer(depth.data, np.float32).reshape(depth.height, depth.width, 1)\n\n euler_rotation = tf.transformations.euler_from_quaternion([odom.pose.pose.orientation.x, odom.pose.pose.orientation.y, odom.pose.pose.orientation.z, odom.pose.pose.orientation.w])\n self.robot_orientation = euler_rotation[2]\n self.robot_world_transform_matrix = np.array([[math.cos(euler_rotation[2]), -math.sin(euler_rotation[2]), 0, odom.pose.pose.position.x],\n [math.sin(euler_rotation[2]), math.cos(euler_rotation[2]), 0, odom.pose.pose.position.y],\n [0, 0, 1, odom.pose.pose.position.z],\n [0, 0, 0, 1]])\n self.new_data = True\n \n################# DATA OBTAINING #################\n\n def get_people_data(self, img, depth, robot_trans_matrix, robot_orientation):\n img0 = copy.deepcopy(img)\n img = letterbox(img, 640, stride=self.stride, auto=True)[0]\n img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n img = np.ascontiguousarray(img)\n img = torch.from_numpy(img).to(self.device)\n img = img / 255.0 # 0 - 255 to 0.0 - 1.0\n \n if len(img.shape) == 3:\n img = img[None] # expand for batch dim\n\n # Make inference with both models\n init = time.time()\n out_ori = self.model(img, augment=True, scales=[1])[0]\n out_v8 = self.model_v8.predict(img0, classes=0, show_conf=True)\n print(\"EXPENDED TimE:\", time.time() - init)\n # # YOLO V8 data processing\n # if \"pose\" in self.yolo_model_name:\n # bboxes, confidences, poses = self.get_pose_data(out_v8, depth, robot_trans_matrix, img0)\n # else:\n # bboxes, confidences, poses = self.get_segmentator_data(out_v8, depth, robot_trans_matrix)\n\n # # Orientation model data processing\n\n # out = non_max_suppression(out_ori, 0.3, 0.5, num_angles=self.data['num_angles'])\n # orientation_bboxes = scale_coords(img.shape[2:], out[0][:, :4], img0.shape[:2]).cpu().numpy().astype(int) # native-space pred\n # orientations = (out[0][:, 6:].cpu().numpy() * 360) - 180 # N*1, (0,1)*360 --> (0,360)\n \n # # Hungarian algorithm for matching people from segmentation model and orientation model\n\n # matches = self.associate_orientation_with_segmentation(orientation_bboxes, bboxes)\n\n # # aux_objects_write = []\n # # for match in matches:\n # # act_object = Object()\n # # act_object.type = 0\n # # act_object.left = int(segmentation_bboxes[match[1]][0])\n # # act_object.top = int(segmentation_bboxes[match[1]][1])\n # # act_object.right = int(segmentation_bboxes[match[1]][2])\n # # act_object.bot = int(segmentation_bboxes[match[1]][3])\n # # act_object.score = segmentation_confidences[match[1]]\n # # act_object.orientation = orientations[match[0]]\n # # aux_objects_write.append(act_object)\n # # bytetrack_srv_proxy = rospy.ServiceProxy('bytetrack_srv', ObjectsSRV)\n # # try:\n # # aux_objects_read = bytetrack_srv_proxy(aux_objects_write).res\n # # except rospy.ServiceException as e:\n # # print(\"Service call failed: %s\"%e)\n # # ret_bboxes = [[person.left if person.left > 0 else 0, person.top if person.top > 0 else 0, person.right if person.right < self.width else self.width - 1, person.bot if person.bot < self.height else self.height - 1] for person in aux_objects_read]\n # # ret_scores = [person.score for person in aux_objects_read]\n # # ret_orientations = [person.orientation for person in aux_objects_read]\n\n # associated_orientations = []\n # for i in range(len(matches)):\n # for j in range(len(matches)):\n # if i == matches[j][1]:\n # transformed_pose = tf.transformations.quaternion_from_euler(0, 0, math.radians(orientations[matches[j][0]][0]) - math.pi) \n # transformed_pose_quaternion = Quaternion(x=transformed_pose[0], y=transformed_pose[1], z=transformed_pose[2], w=transformed_pose[3])\n # # associated_orientations.append(transformed_pose_quaternion)\n # associated_orientations.append(self.transform_orientation_to_world_reference(math.radians(orientations[matches[j][0]][0]), robot_orientation))\n # break\n\n # if len(bboxes) == 0:\n # return [], [], [], []\n # # ret_orientations = [orientations[match[]] for match in matches]\n\n # return bboxes, confidences, associated_orientations, poses\n # return segmentation_bboxes, segmentation_poses, segmentation_confidences\n\n def get_pose_data(self, result, depth_image, robot_trans_matrix, frame):\n pose_bboxes = []\n pose_poses = []\n pose_confidences = []\n for result in result:\n if result.keypoints != None and result.boxes != None:\n boxes = result.boxes\n keypoints = result.keypoints.xy.cpu().numpy().astype(int)\n if len(keypoints) == len(boxes):\n for i in range(len(keypoints)):\n person_bbox = boxes[i].xyxy.cpu().numpy().astype(int)[0] \n if len(keypoints[i]) > 0: \n x_avg = (keypoints[i][5, 0] + keypoints[i][6, 0]) / 2\n y_avg = (keypoints[i][5, 1] + keypoints[i][6, 1]) / 2\n if x_avg < 100 or x_avg > self.width - 100:\n continue\n neck_point = np.array([x_avg, y_avg]).astype(int)\n gender_pred, age_pred = self.get_pred_attributes(frame, person_bbox[0], person_bbox[1], person_bbox[2], person_bbox[3])\n person_pose = self.get_neck_distance(neck_point, depth_image, robot_trans_matrix)\n pose_poses.append(person_pose)\n pose_bboxes.append(person_bbox)\n pose_confidences.append(boxes[i].conf.cpu().numpy()[0]) \n return pose_bboxes, pose_confidences, pose_poses\n\n\n def get_neck_distance(self, neck_point, depth_image, robot_trans_matrix):\n neck_point[0] = neck_point[0] - 1 if neck_point[0] >= self.height else neck_point[0]\n neck_point[1] = neck_point[1] - 1 if neck_point[1] >= self.width else neck_point[1]\n if not np.isinf(depth_image[neck_point[1], neck_point[0]]):\n neck_point_3d = self.depth_point_to_xyz(neck_point, depth_image[neck_point[1], neck_point[0]])\n world_neck_point_3d = self.transform_pose_to_world_reference(neck_point_3d, robot_trans_matrix)\n return world_neck_point_3d\n else:\n return [np.inf, np.inf]\n\n def get_segmentator_data(self, result, depth_image, robot_trans_matrix):\n segmentation_bboxes = []\n segmentation_poses = []\n segmentation_confidences = []\n for result in result:\n if result.masks != None and result.boxes != None:\n masks = result.masks.xy\n boxes = result.boxes\n if len(masks) == len(boxes):\n for i in range(len(boxes)):\n person_bbox = boxes[i].xyxy.cpu().numpy().astype(int)[0]\n segmentation_bboxes.append(person_bbox)\n segmentation_confidences.append(boxes[i].conf.cpu().numpy()[0])\n person_pose = self.get_mask_distance(masks[i], depth_image, robot_trans_matrix)\n segmentation_poses.append(person_pose)\n return segmentation_bboxes, segmentation_confidences, segmentation_poses\n\n def get_mask_distance(self, mask, depth_image, robot_trans_matrix):\n valid_points = []\n for point in mask:\n x, y = int(point[0]), int(point[1])\n if not np.isinf(depth_image[y, x]):\n valid_points.append(self.depth_point_to_xyz(point, depth_image[y, x]))\n\n if valid_points:\n mean_point = np.mean(valid_points, axis=0)\n world_pose = self.transform_pose_to_world_reference(mean_point, robot_trans_matrix)\n return world_pose\n else:\n return [np.inf, np.inf]\n\n def associate_orientation_with_segmentation(self, seg_bboxes, ori_bboxes):\n dists = self.iou_distance(seg_bboxes, ori_bboxes)\n matches, unmatched_a, unmatched_b = self.linear_assignment(dists, 0.9)\n # print(\"NO MATCHES A\", unmatched_a)\n # print(\"NO MATCHES b\", unmatched_b)\n # print(\"MATCHES\", matches)\n return matches\n\n def linear_assignment(self, cost_matrix, thresh):\n if cost_matrix.size == 0:\n return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1]))\n matches, unmatched_a, unmatched_b = [], [], []\n cost, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh)\n for ix, mx in enumerate(x):\n if mx >= 0:\n matches.append([ix, mx])\n unmatched_a = np.where(x < 0)[0]\n unmatched_b = np.where(y < 0)[0]\n matches = np.asarray(matches)\n return matches, unmatched_a, unmatched_b\n\n def iou_distance(self, atracks, btracks):\n if (len(atracks)>0 and isinstance(atracks[0], np.ndarray)) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)):\n atlbrs = atracks\n btlbrs = btracks\n else:\n atlbrs = [track.tlbr for track in atracks]\n btlbrs = [track.tlbr for track in btracks]\n _ious = self.ious(atlbrs, btlbrs)\n cost_matrix = 1 - _ious\n\n return cost_matrix\n\n def ious(self, atlbrs, btlbrs):\n ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=float)\n if ious.size == 0:\n return ious\n\n ious = bbox_ious(\n np.ascontiguousarray(atlbrs, dtype=float),\n np.ascontiguousarray(btlbrs, dtype=float)\n )\n\n return ious\n\n # def get_people_pose(self, people_bboxes, depth_image):\n # radius = 5\n # color = (0, 0, 255) # Color en formato BGR (azul)\n # thickness = -1 # Relleno del círculo\n # people_poses = []\n # for person_bbox in people_bboxes:\n # cv2.rectangle(self.color_image, (int(person_bbox[0]), int(person_bbox[1])), (int(person_bbox[2]), int(person_bbox[3])), (255, 0, 0), 2)\n # x_range = int(person_bbox[0] + (person_bbox[2] - person_bbox[0]) / 2)\n # y_range = int(person_bbox[1] + (person_bbox[3] - person_bbox[1]) / 5)\n # # image_section = depth_image[int(person_bbox[3] / 5):int(person_bbox[3] / 4), x_range]\n # # print(image_section)\n # # # print(image_section)\n # # if image_section.size > 0: \n # # min_value = np.unravel_index(np.argmin(image_section), image_section.shape) \n # # else: \n # # continue\n\n # if math.isinf(depth_image[y_range][x_range]):\n # image_section = depth_image[y_range, int(person_bbox[0]):int(person_bbox[2])]\n # print(\"SECTION X\", int(person_bbox[2] / 4), int(person_bbox[2] * 3 / 4))\n # # print(image_section)\n # if image_section.size > 0: \n # print(image_section.shape)\n # min_value = np.unravel_index(np.argmin(image_section), image_section.shape) \n # if not math.isinf(depth_image[y_range][min_value[0]]): \n # print(\"Min value:\", min_value)\n # from_robot_pose = self.depth_point_to_xyz([min_value[0] + person_bbox[0], y_range], depth_image[y_range][min_value[0] + person_bbox[0]])\n # cv2.circle(self.color_image, (min_value[0] + person_bbox[0], y_range), radius, color, thickness)\n # else:\n # print(\"PROJECTED POINT:\", x_range, person_bbox[3])\n # cv2.circle(self.color_image, (x_range, person_bbox[3]), radius, color, thickness)\n # from_robot_pose = self.calculate_depth_with_projection([x_range, person_bbox[3]])\n # else: \n # print(\"PROJECTED POINT:\", x_range, person_bbox[3])\n # cv2.circle(self.color_image, (x_range, person_bbox[3]), radius, color, thickness)\n # from_robot_pose = self.calculate_depth_with_projection([x_range, person_bbox[3]])\n # else:\n # print(\"DEPTH POINT:\", x_range, y_range)\n # cv2.circle(self.color_image, (x_range, y_range), radius, color, thickness)\n # from_robot_pose = self.depth_point_to_xyz([x_range, y_range], depth_image[y_range][x_range])\n # world_person_pose = self.transform_pose_to_world_reference(from_robot_pose)\n # people_poses.append(world_person_pose)\n # return people_poses\n \n def get_yolo_objects(self, event):\n if self.new_data:\n depth_image = self.depth_image\n color_image = self.color_image\n robot_trans_matrix = self.robot_world_transform_matrix\n robot_orientation = self.robot_orientation\n self.get_people_data(color_image, depth_image, robot_trans_matrix, robot_orientation)\n # bboxes, scores, orientations, poses = self.get_people_data(color_image, depth_image, robot_trans_matrix, robot_orientation)\n # self.create_interface_data(bboxes, orientations, poses, scores)\n # self.new_data = False\n\n################# DATA STRUCTURATION #################\n\n def create_interface_data(self, boxes, orientations, centers, scores):\n objects = ObjectsMSG()\n objects.header.stamp = rospy.Time.now()\n print(\"EEE\")\n if len(boxes) == len(orientations) == len(centers) == len(scores):\n for index in range(len(boxes)):\n act_object = Object()\n act_object.type = 0\n act_object.left = boxes[index][0]\n act_object.top = boxes[index][1]\n act_object.right = boxes[index][2]\n act_object.bot = boxes[index][3]\n act_object.score = scores[index]\n # bbx_center_depth = [int((act_object.left + (act_object.right - act_object.left)/2)), int((act_object.top + (act_object.bot - act_object.top)/2))]\n act_object.pose = Pose()\n act_object.pose.position.x = centers[index][0] \n act_object.pose.position.y = centers[index][1]\n\n act_object.pose.orientation = orientations[index]\n act_object.image = self.get_bbox_image_data(self.color_image, [act_object.left, act_object.top, act_object.right, act_object.bot])\n \n objects.objectsmsg.append(act_object)\n print(len(objects.objectsmsg))\n self.objects_publisher.publish(objects)\n\n def get_bbox_image_data(self, image, element_box):\n cropped_image = image[int(element_box[1]):int(element_box[3]), int(element_box[0]):int(element_box[2])]\n y, x, _ = cropped_image.shape\n return msg_Image(data=cropped_image.tobytes(), height=y, width=x)\n\n################# TO WORLD TRANSFORMATIONS #################\n\n def transform_pose_to_world_reference(self, person_pose, robot_trans_matrix):\n # print(person_pose)\n # person_world_position = np.dot(self.camera_pose_respect_robot, np.dot(robot_trans_matrix, np.array([person_pose[0], -person_pose[1], 0, 1])))\n person_world_position = np.dot(robot_trans_matrix, np.array([person_pose[0], -person_pose[1], 0, 1]))\n\n return [round(person_world_position[0], 3), round(person_world_position[1], 3)]\n \n def transform_orientation_to_world_reference(self, orientation, robot_orientation):\n theta_world = robot_orientation + orientation\n transformed_pose = tf.transformations.quaternion_from_euler(0, 0, -((math.pi)-np.arctan2(np.sin(theta_world), np.cos(theta_world)))) \n return Quaternion(x=transformed_pose[0], y=transformed_pose[1], z=transformed_pose[2], w=transformed_pose[3])\n\n################# IMAGE POINTS TO DEPTH #################\n\n def depth_point_to_xyz(self, pixel, depth):\n # angle_y = ((math.pi - 1.01)/2) + (pixel[1]*1.01/480)\n # angle_z = ((2*math.pi) - 0.785/2) + (pixel[0]*0.785/640)\n angle_y = ((math.pi - 0.785)/2) + (pixel[1]*0.785/480)\n angle_z = ((2*math.pi) - 1.01/2) + (pixel[0]*1.01/640)\n y_distance = depth / math.tan(angle_y)\n z_distance = depth * math.tan(angle_z)\n return depth[0], z_distance[0], y_distance[0]\n \n def calculate_depth_with_projection(self, projected_point):\n world_y = 579.65506 * 1.2 / (projected_point[1] - 243.0783)\n world_x = world_y * (projected_point[0] - 317.47191) / 577.55158\n return [world_y, world_x]\n \n################# PERSON ATTRIBUTES #################\n\n\n\n def get_pred_attributes(self, frame, x1, y1, x2, y2):\n img = frame[y1:y2, x1:x2]\n img = Image.fromarray(img)\n img = self.valid_tsfm(img)\n valid_logits = self.age_classification_model(img.unsqueeze(0))\n valid_probs = torch.sigmoid(valid_logits)\n \n age_pred = self.age_range[torch.argmax(valid_probs[0][0:-1])]\n gender_pred = \"M\" if valid_probs[0][-1] > 0.5 else \"F\"\n\n return gender_pred, age_pred\n\n def load_age_predictor_state_dict(self, model):\n\n PATH_TO_AGE_GENDER_PREDICTOR_CHECKPOINT = '/home/robolab/software/BOSCH-Age-and-Gender-Prediction/exp_result/PETA/PETA/img_model/ckpt_max.pth'\n\n loaded = torch.load(PATH_TO_AGE_GENDER_PREDICTOR_CHECKPOINT, map_location=torch.device(\"cuda:0\"))\n\n if not torch.cuda.is_available():\n # remove `module.`\n new_state_dict = OrderedDict()\n for k, v in loaded['state_dicts'].items():\n name = k[7:] \n new_state_dict[name] = v\n\n # load parameters\n model.load_state_dict(new_state_dict, strict=False)\n else: \n model.load_state_dict(loaded['state_dicts'], strict=False)\n \n print(\"Load successful\")\n model = model.eval()\n\n\n\n\ndef handler(signal_received, frame):\n # Handle any cleanup here\n print('SIGINT or CTRL-C detected. Exiting gracefully')\n gc.collect()\n torch.cuda.empty_cache()\n exit(0)\n\n################# MAIN #################\n\nif __name__ == '__main__':\n rospy.init_node(\"yolov8\")\n rospy.loginfo(\"yolov8 node has been started\")\n\n signal(SIGINT, handler)\n\n yolo = yolov8()\n # rospy.wait_for_service('bytetrack_srv')\n\n rgb_subscriber = message_filters.Subscriber(\"/xtion/rgb/image_raw\", msg_Image)\n depth_subscriber = message_filters.Subscriber(\"/xtion/depth/image_raw\", msg_Image)\n odom_subscriber = message_filters.Subscriber(\"/camera_odom\", Odometry)\n \n ts = message_filters.TimeSynchronizer([rgb_subscriber, depth_subscriber, odom_subscriber], 5)\n ts.registerCallback(yolo.store_data)\n rospy.Timer(rospy.Duration(0.033), yolo.get_yolo_objects)\n rospy.spin()\n\n # rospy.logwarn(\"Warning test message\")\n # rospy.logerr(\"Error test message\")\n # rospy.loginfo(\"End of program\")\n","repo_name":"GeraGrind96/ROS_people_tracking","sub_path":"yolov8/scripts/complete_pose.py","file_name":"complete_pose.py","file_ext":"py","file_size_in_byte":23431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"70228591922","text":"# Python Version: 3.x\n\"\"\"\n.. py:data:: services\n\n :type: :py:class:`List` [ :py:class:`Type` [ :py:class:`onlinejudge.type.Service` ] ]\n\n contains classes to use for :py:func:`service_from_url`\n\n.. py:data:: problems\n\n :type: :py:class:`List` [ :py:class:`Type` [ :py:class:`onlinejudge.type.Problem` ] ]\n\n contains classes to use for :py:func:`problem_from_url`\n\n.. py:data:: submissions\n\n :type: :py:class:`List` [ :py:class:`Type` [ :py:class:`onlinejudge.type.Submission` ] ]\n\n contains classes to use for :py:func:`submission_from_url`\n\"\"\"\n\nfrom typing import List, Optional, Type\n\nfrom onlinejudge.type import Contest, Problem, Service, Submission\n\nsubmissions = [] # type: List[Type['Submission']]\n\n\ndef submission_from_url(url: str) -> Optional[Submission]:\n for cls in submissions:\n submission = cls.from_url(url)\n if submission is not None:\n return submission\n return None\n\n\nproblems = [] # type: List[Type['Problem']]\n\n\ndef problem_from_url(url: str) -> Optional[Problem]:\n \"\"\"\n >>> onlinejudge.dispatch.problem_from_url(\"https://atcoder.jp/contests/abc077/tasks/arc084_b\")\n \n\n >>> onlinejudge.dispatch.problem_from_url(\"https://codeforces.com/contest/1012/problem/D\")\n \n \"\"\"\n\n for cls in problems:\n problem = cls.from_url(url)\n if problem is not None:\n return problem\n return None\n\n\ncontests = [] # type: List[Type['Contest']]\n\n\ndef contest_from_url(url: str) -> Optional[Contest]:\n for cls in contests:\n contest = cls.from_url(url)\n if contest is not None:\n return contest\n return None\n\n\nservices = [] # type: List[Type['Service']]\n\n\ndef service_from_url(url: str) -> Optional[Service]:\n for cls in services:\n service = cls.from_url(url)\n if service is not None:\n return service\n submission = submission_from_url(url)\n if submission is not None:\n return submission.get_service()\n problem = problem_from_url(url)\n if problem is not None:\n return problem.get_service()\n return None\n","repo_name":"online-judge-tools/api-client","sub_path":"onlinejudge/dispatch.py","file_name":"dispatch.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"75"}
+{"seq_id":"25042809881","text":"def maximum_index(l):\n if isinstance(l,list) == False or l == []:\n return None\n max = 0\n index = 0 \n for pos,i in enumerate(l):\n if i > max:\n max = i\n index = pos \n return index\n\nprint(maximum_index(\"papa\"))","repo_name":"thetheos/BAC1INFO1","sub_path":"Revision/mission4/max_index.py","file_name":"max_index.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"}
+{"seq_id":"70049003442","text":"import math\nimport sys\ninput = sys.stdin.readline\n\n\ndef find_max(l, r):\n target = -9999999999\n while True:\n if l > r:\n break\n if l % 2:\n target = max(target, tree[l])\n l += 1\n if r % 2 == 0:\n target = max(tree[r], target)\n r -= 1\n r //= 2\n l //= 2\n return target\n\n\ndef swap(idx):\n if idx == 1:\n return\n else:\n if idx % 2:\n tree[idx // 2] = max(tree[idx-1], tree[idx])\n else:\n tree[idx // 2] = max(tree[idx], tree[idx+1])\n swap(idx//2)\n\n\nn, m, q = map(int, input().split())\n\narr = [*map(int, input().split())]\nh = int(math.log(n, 2)) if math.log(n, 2) / int(math.log(n, 2)) == 1 else int(math.log(n, 2)) + 1\n\nleft = list(sorted(map(int, input().split())))\nright = list(sorted(map(int, input().split())))\n\ntree = [-9999999999] * 2 ** (h+1)\nfor i in range(n):\n node = 2 ** h + i\n temp = arr[i]\n while node != 0:\n tree[node] = max(tree[node], temp)\n node //= 2\nfor _ in range(q):\n a, b = map(int, input().split())\n tree[2 ** h + a - 1], tree[2 ** h + b - 1] = tree[2 ** h + b - 1], tree[2 ** h + a - 1]\n swap(2 ** h + a - 1)\n swap(2 ** h + b - 1)\n dic = {}\n res = 0\n for i in range(m):\n if left[i] <= right[i]:\n if dic.get((left[i], right[i])) is None:\n dic[(left[i], right[i])] = find_max(2**h + left[i]-1, 2 ** h + right[i]-1)\n res = max(res, dic[(left[i], right[i])])\n else:\n res = 10 ** 9\n print(res)\n","repo_name":"SINHOLEE/Algorithm","sub_path":"python/beckjun/17082.py","file_name":"17082.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"28635840036","text":"import fileinput\nimport itertools\nimport sys\n\nT = int(next(sys.stdin))\n\nfor t, N in enumerate(itertools.islice((int(line) for line in sys.stdin), T), 1):\n if N is 0:\n print('Case #{:d}: INSOMNIA'.format(t))\n else:\n digits = set()\n i = 0\n while len(digits) < 10:\n i += 1\n digits.update(set(str(i * N)))\n print('Case #{:d}: {:d}'.format(t, i * N))\n","repo_name":"DaHuO/Supergraph","sub_path":"codes/CodeJamCrawler/16_0_1_neat/16_0_1_jbnicolai_main.py","file_name":"16_0_1_jbnicolai_main.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"32591380281","text":"from django import forms\nfrom .models import product\n\n\nclass prodForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = product\n\t\tfields = [\n\t\t\t'title',\n\t\t\t'description',\n\t\t\t'price'\n\t\t]\n\nclass RawProdForm(forms.Form):\n\ttitle = forms.CharField(widget=forms.TextInput(attrs = {\n\t\t\t\"placeholder\" : \"come on :)\"\n\t\t}))\n\tdescription = forms.CharField(required=False, widget=forms.Textarea(attrs = {\n\t\t\t\"class\" : \"newOne\",\n\t\t\t\"rows\" : 5,\n\t\t\t\"cols\" : 50,\n\t\t\t\"placeholder\" : \"just do it\"\n\t\t\t})\n\t\t)\n\tprice = forms.DecimalField(initial=0.00)","repo_name":"nikkeo/Projects-on-python","sub_path":"firstTryDjango/src/products/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"16746088614","text":"import contextlib\nimport fnmatch\nimport hashlib\nimport logging\nimport os\nfrom pathlib import Path\nimport re\nimport shutil\nimport stat\nimport tempfile\nimport time\nfrom typing import Callable, Optional, Sequence, Union, cast\n\nfrom py._path.local import LocalPath\n\n\nLOG = logging.getLogger(\"pytest-wdl\")\nLOG.setLevel(os.environ.get(\"LOGLEVEL\", \"WARNING\").upper())\n\nENV_PATH = \"PATH\"\nENV_CLASSPATH = \"CLASSPATH\"\nDEFAULT_CLASSPATH = \".\"\n\nUNSAFE_RE = re.compile(r\"[^\\w.-]\")\n\n\ndef safe_string(s: str, replacement: str = \"_\") -> str:\n \"\"\"\n Makes a string safe by replacing non-word characters.\n\n Args:\n s: The string to make safe\n replacement: The replacement stringj\n\n Returns:\n The safe string\n \"\"\"\n return UNSAFE_RE.sub(replacement, s)\n\n\n# def deprecated(f: Callable):\n# \"\"\"\n# Decorator for deprecated functions/methods. Deprecated functionality will be\n# removed before each major release.\n# \"\"\"\n# def decorator(*args, **kwargs):\n# LOG.warning(f\"Function/method {f.__name__} is deprecated and will be removed\")\n# f(*args, **kwargs)\n# return decorator\n\n\n@contextlib.contextmanager\ndef chdir(todir: Path):\n \"\"\"\n Context manager that temporarily changes directories.\n\n Args:\n todir: The directory to change to.\n \"\"\"\n curdir = Path.cwd()\n try:\n os.chdir(todir)\n yield todir\n finally:\n os.chdir(curdir)\n\n\n@contextlib.contextmanager\ndef tempdir(\n change_dir: bool = False,\n tmproot: Optional[Path] = None,\n cleanup: Optional[bool] = True,\n) -> Path:\n \"\"\"\n Context manager that creates a temporary directory, yields it, and then\n deletes it after return from the yield.\n\n Args:\n change_dir: Whether to temporarily change to the temp dir.\n tmproot: Root directory in which to create temporary directories.\n cleanup: Whether to delete the temporary directory before exiting the context.\n \"\"\"\n temp = ensure_path(tempfile.mkdtemp(dir=tmproot))\n try:\n if change_dir:\n with chdir(temp):\n yield temp\n else:\n yield temp\n finally:\n if cleanup:\n shutil.rmtree(temp)\n\n\n@contextlib.contextmanager\ndef context_dir(\n path: Optional[Path] = None,\n change_dir: bool = False,\n cleanup: Optional[bool] = None,\n) -> Path:\n \"\"\"\n Context manager that looks for a specific environment variable to specify a\n directory. If the environment variable is not set, a temporary directory is\n created and cleaned up upon return from the yield.\n\n Args:\n path: The environment variable to look for.\n change_dir: Whether to change to the directory.\n cleanup: Whether to delete the directory when exiting the context. If None,\n the directory is only deleted if a temporary directory is created.\n\n Yields:\n A directory path.\n \"\"\"\n if cleanup is None:\n cleanup = path is None\n\n if not path:\n path = Path(tempfile.mkdtemp())\n elif not path.exists():\n path.mkdir(parents=True)\n\n try:\n if change_dir:\n with chdir(path):\n yield path\n else:\n yield path\n finally:\n if cleanup and path.exists():\n shutil.rmtree(path, ignore_errors=True)\n\n\ndef ensure_path(\n path: Union[str, LocalPath, Path],\n search_paths: Optional[Sequence[Path]] = None,\n canonicalize: bool = True,\n exists: Optional[bool] = None,\n is_file: Optional[bool] = None,\n executable: Optional[bool] = None,\n create: bool = False,\n) -> Path:\n \"\"\"\n Converts a string path or :class:`py.path.local.LocalPath` to a\n :class:`pathlib.Path`.\n\n Args:\n path: The path to convert.\n search_paths: Directories to search for `path` if it is not already absolute.\n If `exists` is True, looks for the first search path that contains the file,\n otherwise just uses the first search path.\n canonicalize: Whether to return the canonicalized version of the path -\n expand home directory shortcut (~), make absolute, and resolve symlinks.\n exists: If True, raise an exception if the path does not exist; if False,\n raise an exception if the path does exist.\n is_file: If True, raise an exception if the path is not a file; if False,\n raise an exception if the path is not a directory.\n executable: If True and `is_file` is True and the file exists, raise an\n exception if it is not executable.\n create: Create the directory (or parent, if `is_file` = True) if\n it does not exist. Ignored if `exists` is True.\n\n Returns:\n A `pathlib.Path` object.\n \"\"\"\n if isinstance(path, Path):\n p = cast(Path, path)\n else:\n p = Path(str(path))\n\n p = Path(os.path.expandvars(p))\n\n if canonicalize:\n p = p.expanduser()\n\n if search_paths and not p.is_absolute():\n if exists:\n for search_path in search_paths:\n p_tmp = search_path / p\n if p_tmp.exists():\n p = p_tmp.absolute()\n break\n else:\n p = (search_paths[0] / p).absolute()\n\n p = p.resolve()\n\n if p.exists():\n if exists is False:\n raise FileExistsError(f\"Path {p} already exists\")\n if is_file is True:\n if p.is_dir():\n raise IsADirectoryError(f\"Path {p} is not a file\")\n elif executable and not is_executable(p):\n raise OSError(f\"File {p} is not executable\")\n elif is_file is False and not p.is_dir():\n raise NotADirectoryError(f\"Path {p} is not a directory\")\n elif exists is True:\n raise FileNotFoundError(f\"Path {p} does not exist\")\n elif create:\n if is_file:\n p.parent.mkdir(parents=True, exist_ok=True)\n else:\n p.mkdir(parents=True, exist_ok=True)\n\n return p\n\n\ndef resolve_file(\n filename: Union[str, Path], project_root: Path, assert_exists: bool = True\n) -> Optional[Path]:\n \"\"\"\n Finds `filename` under `project_root` or in the project path.\n\n Args:\n filename: The filename, relative path, or absolute path to resolve.\n project_root: The project root dir.\n assert_exists: Whether to raise an error if the file cannot be found.\n\n Returns:\n A `pathlib.Path` object, or None if the file cannot be found and\n `assert_exists` is False.\n\n Raises:\n FileNotFoundError if the file cannot be found and `assert_exists` is True.\n \"\"\"\n path = ensure_path(filename, canonicalize=False)\n is_abs = path.is_absolute()\n\n if is_abs and path.exists():\n return path\n\n if not is_abs:\n check_path = ensure_path(project_root / path)\n if check_path.exists():\n return check_path\n # Search in cwd\n check_path = find_project_path(path)\n if check_path and check_path.exists():\n return check_path\n # Search upward from project root\n check_path = find_project_path(path, start=project_root)\n if check_path and check_path.exists():\n return check_path\n\n if assert_exists:\n raise FileNotFoundError(f\"Could not resolve file: {filename}\")\n else:\n return None\n\n\ndef find_project_path(\n *filenames: Union[str, Path],\n start: Optional[Path] = None,\n return_parent: bool = False,\n assert_exists: bool = False,\n) -> Optional[Path]:\n \"\"\"\n Starting from `path` folder and moving upwards, search for any of `filenames` and\n return the first path containing any one of them.\n\n Args:\n *filenames: Filenames to search. Either a string filename, or a sequence of\n string path elements.\n start: Starting folder\n return_parent: Whether to return the containing folder or the discovered file.\n assert_exists: Whether to raise an exception if a file cannot be found.\n\n Returns:\n A `Path`, or `None` if no folder is found that contains any of `filenames`.\n If `return_parent` is `False` and more than one of the files is found one\n of the files is randomly selected for return.\n\n Raises:\n FileNotFoundError if the file cannot be found and `assert_exists` is True.\n \"\"\"\n path = start or Path.cwd()\n while path != path.parent:\n for filename in filenames:\n if isinstance(filename, str):\n found = list(path.glob(filename))\n found = found[0] if found else None\n else:\n found = path / filename\n if not found.exists():\n found = None\n if found:\n LOG.debug(\"Found %s in %s\", filename, path)\n if return_parent:\n return path\n else:\n return found\n else:\n path = path.parent\n\n if assert_exists:\n raise FileNotFoundError(\n f\"Could not find any of {','.join(str(f) for f in filenames)} \"\n f\"starting from {start}\"\n )\n\n return None\n\n\ndef find_executable_path(\n executable: str, search_path: Optional[Sequence[Path]] = None\n) -> Optional[Path]:\n \"\"\"Finds 'executable' in `search_path`.\n\n Args:\n executable: The name of the executable to find.\n search_path: The list of directories to search. If None, the system search\n path (defined by the $PATH environment variable) is used.\n\n Returns:\n Absolute path of the executable, or None if no matching executable was found.\n \"\"\"\n if search_path is None:\n if ENV_PATH in os.environ:\n search_path = [Path(p) for p in os.environ[ENV_PATH].split(os.pathsep)]\n else:\n return None\n for path in search_path:\n exe_path = path / executable\n if exe_path.exists() and is_executable(exe_path):\n return exe_path\n else:\n return None\n\n\ndef is_executable(path: Path) -> bool:\n \"\"\"\n Checks if a path is executable.\n\n Args:\n path: The path to check\n\n Returns:\n True if `path` exists and is executable by the user, otherwise False.\n \"\"\"\n return path.exists() and os.stat(path).st_mode & stat.S_IXUSR\n\n\ndef find_in_classpath(glob: str) -> Optional[Path]:\n \"\"\"\n Attempts to find a .jar file matching the specified glob pattern in the\n Java classpath.\n\n Args:\n glob: JAR filename pattern\n\n Returns:\n Path to the JAR file, or None if a matching file is not found.\n \"\"\"\n classpath = os.environ.get(ENV_CLASSPATH, DEFAULT_CLASSPATH)\n\n for path_str in classpath.split(os.pathsep):\n path = ensure_path(path_str)\n if path.exists():\n if path.is_dir():\n matches = list(path.glob(glob))\n if matches:\n if len(matches) > 1:\n LOG.warning(\n \"Found multiple jar files matching pattern %s: %s;\"\n \"returning the first one.\",\n glob,\n matches,\n )\n return matches[0]\n elif path.exists() and fnmatch.fnmatch(path.name, glob):\n return path\n\n\ndef env_map(d: dict) -> dict:\n \"\"\"\n Given a mapping of keys to value descriptors, creates a mapping of the keys to\n the described values.\n \"\"\"\n envmap = {}\n for name, value_descriptor in d.items():\n value = resolve_value_descriptor(value_descriptor)\n if value:\n envmap[name] = value\n return envmap\n\n\ndef resolve_value_descriptor(value_descriptor: Union[str, dict]) -> Optional:\n \"\"\"\n Resolves the value of a value descriptor, which may be an environment variable\n name, or a map with keys `env` (the environment variable name) and `value` (the\n value to use if `env` is not specified or if the environment variable is unset.\n\n Args:\n value_descriptor:\n\n Returns:\n\n \"\"\"\n if isinstance(value_descriptor, str):\n return os.environ.get(value_descriptor)\n elif \"env\" in value_descriptor:\n return os.environ.get(value_descriptor[\"env\"], value_descriptor.get(\"value\"))\n else:\n return value_descriptor.get(\"value\")\n\n\nclass DigestsNotEqualError(AssertionError):\n pass\n\n\ndef compare_files_with_hash(file1: Path, file2: Path, hash_name: str = \"md5\"):\n file1_digest = hash_file(file1, hash_name)\n file2_digest = hash_file(file2, hash_name)\n if file1_digest != file2_digest:\n raise DigestsNotEqualError(\n f\"{hash_name} digests differ between expected identical files \"\n f\"{file1}, {file2}\"\n )\n\n\ndef hash_file(path: Path, hash_name: str = \"md5\") -> str:\n assert hash_name in hashlib.algorithms_guaranteed\n with open(path, \"rb\") as inp:\n hashobj = hashlib.new(hash_name)\n hashobj.update(inp.read())\n return hashobj.hexdigest()\n\n\ndef verify_digests(path: Path, digests: dict):\n for hash_name, expected_digest in digests.items():\n try:\n actual_digest = hash_file(path, hash_name)\n except AssertionError: # TODO: test this\n LOG.warning(\n \"Hash algorithm %s is not supported; cannot verify file %s\",\n hash_name,\n path,\n )\n continue\n if actual_digest != expected_digest:\n raise DigestsNotEqualError(\n f\"{hash_name} digest {actual_digest} of file \"\n f\"{path} does match expected value {expected_digest}\"\n )\n\n\nclass PollingException(Exception):\n \"\"\"Base exception that stores the last result seen.\"\"\"\n def __init__(self, last=None):\n self.last = last\n\n\nclass TimeoutException(PollingException):\n \"\"\"Exception raised if polling function times out\"\"\"\n\n\nclass MaxCallException(PollingException):\n \"\"\"Exception raised if maximum number of iterations is exceeded\"\"\"\n\n\ndef poll(\n target: Callable,\n step: int = 1,\n args: Optional[Sequence] = None,\n kwargs: Optional[dict] = None,\n timeout: Optional[int] = None,\n max_tries: Optional[int] = None,\n check_success: Callable = bool,\n step_function: Optional[Callable[[int, int], int]] = None,\n ignore_exceptions: Sequence = (),\n):\n \"\"\"\n Poll by calling a target function until a certain condition is met. You must specify\n at least a target function to be called and the step -- base wait time between\n each function call.\n\n Vendored from the [polling](https://github.com/justiniso/polling) package.\n\n Args:\n target: The target callable\n step: Step defines the amount of time to wait (in seconds)\n args: Arguments to be passed to the target function\n kwargs: Keyword arguments to be passed to the target function\n timeout: The target function will be called until the time elapsed is greater\n than the maximum timeout (in seconds). NOTE that the actual execution\n time of the function *can* exceed the time specified in the timeout. For\n instance, if the target function takes 10 seconds to execute and the timeout\n is 21 seconds, the polling function will take a total of 30 seconds (two\n iterations of the target --20s which is less than the timeout--21s,\n and a final iteration)\n max_tries: Maximum number of times the target function will be called before\n failing\n check_success: A callback function that accepts the return value of the target\n function. It must return true if you want the polling function to stop\n and return this value. It must return false if you want to continue\n polling. You may also use this function to collect non-success values. The\n default is a callback that tests for truthiness (anything not False, 0,\n or empty collection).\n step_function: A callback function that accepts two arguments: current_step,\n num_tries; and returns the next step value. By default, this is constant,\n but you can also pass a function that will increase or decrease the step.\n As an example, you can increase the wait time between calling the target\n function by 10 seconds every iteration until the step is 100 seconds--at\n which point it should remain constant at 100 seconds\n\n >>> def my_step_function(current_step: int, num_tries: int) -> int:\n >>> return max(current_step + 10, 100)\n\n ignore_exceptions: You can specify a tuple of exceptions that should be caught\n and ignored on every iteration. If the target function raises one of\n these exceptions, it will be caught and the exception instance will be\n pushed to the queue of values collected during polling. Any other exceptions\n raised will be raised as normal.\n\n Returns:\n The first value from the target function that meets the condions of the\n check_success callback. By default, this will be the first value that is not\n None, 0, False, '', or an empty collection.\n \"\"\"\n max_time = time.time() + timeout if timeout else None\n tries = 0\n last_item = None\n\n if args is None:\n args = ()\n\n if kwargs is None:\n kwargs = {}\n\n while True:\n if max_tries and tries >= max_tries:\n raise MaxCallException(last_item)\n\n try:\n val = target(*args, **kwargs)\n last_item = val\n except ignore_exceptions as e:\n last_item = e\n else:\n # Condition passes, this is the only \"successful\" exit from the\n # polling function\n if check_success(val):\n return val\n\n tries += 1\n\n # Check the time after to make sure the poll function is called at least once\n if max_time and time.time() >= max_time:\n raise TimeoutException(last_item)\n\n time.sleep(step)\n\n if step_function:\n step = step_function(step, tries)\n","repo_name":"EliLillyCo/pytest-wdl","sub_path":"pytest_wdl/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":18200,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"75"}
+{"seq_id":"20470517271","text":"\n## this is write by qingluan \n# just a inti handler \n# and a tempalte offer to coder\nimport json\nimport tornado\nimport tornado.web\nimport socks\nfrom tornado.websocket import WebSocketHandler\nfrom .libs import TornadoApi\nfrom .libs import TornadoArgs\n\nfrom mroylib import auth\nfrom mroylib.auth import Authentication\nfrom mroylib.config import Config\nimport logging\nimport os\n\ncon = Config(name='swordnode.ini')\ncon.section = 'user'\nauth.USER_DB_PATH = con['tel_user_db']\n\nlogging.basicConfig(level=logging.INFO)\n\nclass BaseHandler(tornado.web.RequestHandler):\n def prepare(self):\n self.db = self.settings['db']\n self.L = self.settings['L']\n self.tloop = tornado.ioloop.IOLoop.current()\n def get_current_user(self):\n return (self.get_cookie('user'),self.get_cookie('passwd'))\n def get_current_secure_user(self):\n return (self.get_cookie('user'),self.get_secure_cookie('passwd'))\n def set_current_seccure_user_cookie(self,user,passwd):\n self.set_cookie('user',user)\n self.set_secure_cookie(\"passwd\",passwd)\n\n def json_reply(self,data):\n self.write(json.dumps(data))\n\n\nclass SocketHandler(WebSocketHandler):\n \"\"\" Web socket \"\"\"\n clients = set()\n con = dict()\n \n @staticmethod\n def send_to_all(msg):\n for con in SocketHandler.clients:\n con.write_message(json.dumps(msg))\n \n @staticmethod\n def send_to_one(msg, id):\n SocketHandler.con[id(self)].write_message(msg)\n\n def json_reply(self, msg):\n self.write_message(json.dumps(msg))\n\n def open(self):\n SocketHandler.clients.add(self)\n SocketHandler.con[id(self)] = self\n \n def on_close(self):\n SocketHandler.clients.remove(self)\n \n def on_message(self, msg):\n SocketHandler.send_to_all(msg)\n\n\n\nclass AuthHandler(BaseHandler):\n\n @tornado.web.asynchronous\n def post(self):\n # you should get some argument from follow \n parser = TornadoArgs(self, tp='tornado')\n cmd = parser.get_parameter(\"cmd\")\n phone = parser.get_parameter(\"phone\")\n token = parser.get_parameter(\"token\")\n code = parser.get_parameter(\"code\")\n proxy = parser.get_parameter(\"proxy\")\n\n _auth = Authentication(self.settings['user_db_path'], loop=self.tloop)\n if cmd == 'regist':\n _auth.registe(phone, token)\n self.json_reply({'msg': 'regist ok'})\n self.finish()\n elif cmd == 'login':\n def _reply(x, client):\n \n self.json_reply({\"api\": x})\n self.finish()\n logging.info(f\"Loggin in: {phone} {code}\" )\n _auth.login(phone, code, _reply)\n \n elif cmd == 'auth':\n \n _auth.sendcode(phone)\n self.json_reply({'msg':'please recive code!'})\n self.finish()\n else:\n self.json_reply({\"msg\":f'error cmd: {cmd}'})\n self.finish()\n \n\nclass IndexHandler(BaseHandler):\n \n def prepare(self):\n super(IndexHandler, self).prepare()\n self.template = \"template/index.html\"\n\n def get(self):\n # L is log function , which include ok , info , err , fail, wrn\n self.L.ok('got')\n return self.render(self.template, post_page=\"/\")\n\n \n \n\n @tornado.web.asynchronous\n def post(self):\n # you should get some argument from follow \n parser = TornadoArgs(self, tp='tornado')\n proxy = parser.get_parameter(\"proxy\")\n\n api = TornadoApi(name=parser.module, loop=self.tloop, callback=parser.after_dealwith)\n logging.error(f\"Permission : {api.Permission}\")\n key = parser.get_parameter(\"Api-key\", l='head')\n if api.Permission == \"auth\" and key:\n \n if not key:\n self.json_reply({'error': 'No auth key!'})\n self.finish()\n else:\n logging.info(f\"load db: {self.settings['user_db_path']} \")\n _auth = Authentication(self.settings['user_db_path'], proxy=proxy, loop=self.tloop)\n if _auth.if_auth(key.strip()):\n res = api.run(*parser.args, **parser.kwargs)\n if res:\n self.json_reply({'msg': res})\n self.finish()\n else:\n self.json_reply({'error': 'No auth!'})\n self.finish()\n else:\n res = api.run(*parser.args, **parser.kwargs)\n if res:\n self.json_reply({'msg': res})\n self.finish()\n\n ","repo_name":"Qingluan/swordnode","sub_path":"swordserver/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":4622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"16904655969","text":"from django.contrib import admin\nfrom django.contrib.admin.models import DELETION, LogEntry\nfrom django.db.models import Exists, OuterRef\nfrom django.urls import reverse\nfrom django.utils.html import escape\nfrom django.utils.safestring import mark_safe\n\nfrom app.common.enums import GroupType\nfrom app.content import models\nfrom app.group.models.membership import Membership\n\nadmin.site.register(models.News)\nadmin.site.register(models.Category)\nadmin.site.register(models.PriorityPool)\nadmin.site.register(models.Cheatsheet)\nadmin.site.register(models.Page)\nadmin.site.register(models.ShortLink)\nadmin.site.register(models.Toddel)\nadmin.site.register(models.QRCode)\n\n\n@admin.register(models.Strike)\nclass StrikeAdmin(admin.ModelAdmin):\n list_display = (\n \"user\",\n \"event\",\n \"description\",\n \"strike_size\",\n )\n raw_id_fields = (\n \"user\",\n \"event\",\n \"creator\",\n )\n search_fields = (\n \"user__user_id\",\n \"event__title\",\n \"user__first_name\",\n \"user__last_name\",\n )\n\n\ndef admin_delete_registration(modeladmin, request, queryset):\n for registration in queryset:\n registration.admin_unregister()\n\n\n@admin.register(models.Registration)\nclass RegistrationAdmin(admin.ModelAdmin):\n list_display = (\"user\", \"event\", \"is_on_wait\", \"has_attended\")\n search_fields = (\n \"user__user_id\",\n \"event__title\",\n \"user__first_name\",\n \"user__last_name\",\n )\n readonly_fields = (\"created_at\", \"updated_at\")\n list_filter = (\n \"is_on_wait\",\n \"has_attended\",\n \"event\",\n \"user\",\n )\n # Enables checks bypassing from the 'Action' dropdown in Registration overview\n actions = [\n admin_delete_registration,\n ]\n\n\nclass SlackConnectedListFilter(admin.SimpleListFilter):\n \"\"\"Filters users checking if they have connected to their Slack-user\"\"\"\n\n title = \"har tilkoblet Slack-bruker\"\n parameter_name = \"slack_connected\"\n\n def lookups(self, *args, **kwargs):\n return (\n (\"true\", \"Ja\"),\n (\"false\", \"Nei\"),\n )\n\n def queryset(self, request, queryset):\n if self.value() == \"true\":\n return queryset.exclude(slack_user_id__exact=\"\")\n if self.value() == \"false\":\n return queryset.filter(slack_user_id__exact=\"\")\n\n\nclass AffiliatedStudyListFilter(admin.SimpleListFilter):\n \"\"\"Filters users checking if they're connected to a study\"\"\"\n\n title = \"har studie-medlemskap\"\n parameter_name = \"affiliated_study\"\n\n def lookups(self, *args, **kwargs):\n return (\n (\"true\", \"Ja\"),\n (\"false\", \"Nei\"),\n )\n\n def queryset(self, request, queryset):\n connected_query = Exists(\n Membership.objects.filter(\n user__user_id=OuterRef(\"pk\"), group__type=GroupType.STUDY\n )\n )\n if self.value() == \"true\":\n return queryset.filter(connected_query)\n if self.value() == \"false\":\n return queryset.filter(~connected_query)\n\n\nclass AffiliatedStudyyearListFilter(admin.SimpleListFilter):\n \"\"\"Filters users checking if they're connected to a studyyear\"\"\"\n\n title = \"har studieår-medlemskap\"\n parameter_name = \"affiliated_studyyear\"\n\n def lookups(self, *args, **kwargs):\n return (\n (\"true\", \"Ja\"),\n (\"false\", \"Nei\"),\n )\n\n def queryset(self, request, queryset):\n connected_query = Exists(\n Membership.objects.filter(\n user__user_id=OuterRef(\"pk\"), group__type=GroupType.STUDYYEAR\n )\n )\n if self.value() == \"true\":\n return queryset.filter(connected_query)\n if self.value() == \"false\":\n return queryset.filter(~connected_query)\n\n\n@admin.register(models.User)\nclass UserAdmin(admin.ModelAdmin):\n list_display = (\"user_id\", \"first_name\", \"last_name\")\n search_fields = (\"user_id\", \"first_name\", \"last_name\")\n\n list_filter = (\n \"gender\",\n \"public_event_registrations\",\n AffiliatedStudyListFilter,\n AffiliatedStudyyearListFilter,\n SlackConnectedListFilter,\n )\n\n\n@admin.register(models.Event)\nclass EventAdmin(admin.ModelAdmin):\n list_display = (\"title\", \"start_date\", \"location\", \"category\", \"organizer\")\n search_fields = (\n \"title\",\n \"description\",\n \"location\",\n )\n\n list_filter = (\n \"sign_up\",\n \"start_date\",\n \"category\",\n \"organizer\",\n )\n\n\nclass StrikesOverview(models.User):\n class Meta:\n verbose_name_plural = \"Strikes Overview\"\n proxy = True\n\n\n@admin.register(StrikesOverview)\nclass StrikesOverviewAdmin(UserAdmin):\n list_display = (\n \"user_id\",\n \"first_name\",\n \"last_name\",\n \"active_strikes\",\n )\n\n def active_strikes(self, obj):\n return obj.number_of_strikes\n\n def get_actions(self, request):\n \"\"\"Disallow bulk modifications/deletions of users through this panel.\"\"\"\n return []\n\n def get_queryset(self, request):\n qs = super().get_queryset(request)\n active_strikes = models.Strike.objects.active()\n return qs.filter(strikes__in=active_strikes).distinct()\n\n def has_add_permission(self, request):\n return False\n\n def has_change_permission(self, request, obj=None):\n return False\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n\n@admin.register(LogEntry)\nclass LogEntryAdmin(admin.ModelAdmin):\n actions = None\n\n date_hierarchy = \"action_time\"\n\n list_filter = [\"user\", \"content_type\", \"action_flag\"]\n\n search_fields = [\"object_repr\", \"change_message\"]\n\n list_display = [\n \"action_time\",\n \"user\",\n \"content_type\",\n \"object_link\",\n \"action_flag\",\n ]\n\n def has_add_permission(self, request):\n return False\n\n def has_change_permission(self, request, obj=None):\n return False\n\n def has_delete_permission(self, request, obj=None):\n if \"admin/logentry\" in request.path:\n return False\n return True\n\n def has_view_permission(self, request, obj=None):\n return request.user.is_superuser\n\n def object_link(self, obj):\n if obj.action_flag == DELETION:\n link = escape(obj.object_repr)\n else:\n ct = obj.content_type\n link = '%s' % (\n reverse(\n \"admin:%s_%s_change\" % (ct.app_label, ct.model),\n args=[obj.object_id],\n ),\n escape(obj.object_repr),\n )\n return mark_safe(link)\n\n object_link.admin_order_field = \"object_repr\"\n object_link.short_description = \"object\"\n","repo_name":"TIHLDE/Lepton","sub_path":"app/content/admin/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":6770,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"75"}
+{"seq_id":"24112852516","text":"\"\"\"\nSimple wrappers around request methods.\n\"\"\"\n\nfrom functools import update_wrapper\nimport logging\nimport os.path\nimport re\nimport sys\nimport tempfile\nimport typing as ty\n\nimport click\nimport requests\n\nimport git_pw\nfrom git_pw import config\n\nCONF = config.CONF\nLOG = logging.getLogger(__name__)\n\nFilters = ty.List[ty.Tuple[str, str]]\n\n\nclass HTTPTokenAuth(requests.auth.AuthBase):\n \"\"\"Attaches HTTP Token Authentication to the given Request object.\"\"\"\n\n def __init__(self, token: str):\n self.token = token\n\n def __call__(\n self,\n r: requests.PreparedRequest,\n ) -> requests.PreparedRequest:\n r.headers['Authorization'] = self._token_auth_str(self.token)\n return r\n\n @staticmethod\n def _token_auth_str(token: str) -> str:\n \"\"\"Return a Token auth string.\"\"\"\n return 'Token {}'.format(token.strip())\n\n\ndef _get_auth(optional: bool = False) -> ty.Optional[requests.auth.AuthBase]:\n if CONF.token:\n return HTTPTokenAuth(CONF.token)\n elif CONF.username and CONF.password:\n return requests.auth.HTTPBasicAuth(CONF.username, CONF.password)\n elif not optional:\n LOG.error('Authentication information missing')\n LOG.error(\n 'You must configure authentication via git-config or via '\n '--token or --username, --password'\n )\n sys.exit(1)\n return None\n\n\ndef _get_headers() -> ty.Dict[str, str]:\n return {\n 'User-Agent': 'git-pw ({})'.format(git_pw.__version__),\n }\n\n\ndef _get_server() -> str:\n if CONF.server:\n server = CONF.server.rstrip('/')\n\n if not re.match(r'.*/api/\\d\\.\\d$', server):\n LOG.warning('Server version missing')\n LOG.warning(\n 'You should provide the server version in the URL '\n 'configured via git-config or --server'\n )\n LOG.warning('This will be required in git-pw 2.0')\n\n if not re.match(r'.*/api(/\\d\\.\\d)?$', server):\n # NOTE(stephenfin): We've already handled this particular error\n # above so we don't warn twice. We also don't stick on a version\n # number since the user clearly wants the latest\n server += '/api'\n\n return server\n else:\n LOG.error('Server information missing')\n LOG.error(\n 'You must provide server information via git-config or via '\n '--server'\n )\n sys.exit(1)\n\n\ndef _get_project() -> str:\n if CONF.project and CONF.project.strip() == '*':\n return '' # just don't bother filtering on project\n elif CONF.project:\n return CONF.project.strip()\n else:\n LOG.error('Project information missing')\n LOG.error(\n 'You must provide project information via git-config or '\n 'via --project'\n )\n LOG.error('To list all projects, set project to \"*\"')\n sys.exit(1)\n\n\ndef _handle_error(\n operation: str,\n exc: requests.exceptions.RequestException,\n) -> None:\n if exc.response is not None and exc.response.content:\n # server errors should always be reported\n if exc.response.status_code in range(500, 512): # 5xx Server Error\n LOG.error(\n 'Server error. Please report this issue to '\n 'https://github.com/getpatchwork/patchwork'\n )\n raise\n\n # we make the assumption that all responses will be JSON encoded\n if exc.response.status_code == 404:\n LOG.error('Resource not found')\n else:\n LOG.error(exc.response.json())\n else:\n LOG.error(\n 'Failed to %s resource. Is your configuration '\n 'correct?' % operation\n )\n LOG.error(\"Use the '--debug' flag for more information\")\n\n if CONF.debug:\n raise\n else:\n sys.exit(1)\n\n\ndef _get(\n url: str,\n params: ty.Optional[Filters] = None,\n stream: bool = False,\n) -> requests.Response:\n \"\"\"Make GET request and handle errors.\"\"\"\n LOG.debug('GET %s', url)\n\n try:\n # TODO(stephenfin): We only use a subset of the types possible for\n # 'params' (namely a list of tuples) but it doesn't seem possible to\n # indicate this\n rsp = requests.get(\n url,\n auth=_get_auth(optional=True),\n headers=_get_headers(),\n stream=stream,\n params=params,\n ) # type: ignore\n rsp.raise_for_status()\n except requests.exceptions.RequestException as exc:\n _handle_error('fetch', exc)\n\n LOG.debug('Got response')\n\n return rsp\n\n\ndef _post(\n url: str,\n data: ty.List[ty.Tuple[str, ty.Any]],\n) -> requests.Response:\n \"\"\"Make POST request and handle errors.\"\"\"\n LOG.debug('POST %s, data=%r', url, data)\n\n try:\n rsp = requests.post(\n url, auth=_get_auth(), headers=_get_headers(), data=data\n )\n rsp.raise_for_status()\n except requests.exceptions.RequestException as exc:\n _handle_error('create', exc)\n\n LOG.debug('Got response')\n\n return rsp\n\n\ndef _patch(\n url: str,\n data: ty.List[ty.Tuple[str, ty.Any]],\n) -> requests.Response:\n \"\"\"Make PATCH request and handle errors.\"\"\"\n LOG.debug('PATCH %s, data=%r', url, data)\n\n try:\n rsp = requests.patch(\n url,\n auth=_get_auth(),\n headers=_get_headers(),\n data=data,\n )\n rsp.raise_for_status()\n except requests.exceptions.RequestException as exc:\n _handle_error('update', exc)\n\n LOG.debug('Got response')\n\n return rsp\n\n\ndef _delete(url: str) -> requests.Response:\n \"\"\"Make DELETE request and handle errors.\"\"\"\n LOG.debug('DELETE %s', url)\n\n try:\n rsp = requests.delete(url, auth=_get_auth(), headers=_get_headers())\n rsp.raise_for_status()\n except requests.exceptions.RequestException as exc:\n _handle_error('delete', exc)\n\n LOG.debug('Got response')\n\n return rsp\n\n\ndef version() -> ty.Tuple[int, int]:\n \"\"\"Get the version of the server from the URL, if present.\"\"\"\n server = _get_server()\n\n version = re.match(r'.*/(\\d)\\.(\\d)$', server)\n if version:\n return (int(version.group(1)), int(version.group(2)))\n\n # return the oldest version we support if no version provided\n return (1, 0)\n\n\ndef download(\n url: str,\n params: ty.Optional[Filters] = None,\n output: ty.Optional[ty.Optional[str]] = None,\n) -> ty.Optional[str]:\n \"\"\"Retrieve a specific API resource and save it to a file/stdout.\n\n The ``Content-Disposition`` header is assumed to be present and\n will be used for the output filename, if not writing to stdout.\n\n Arguments:\n url: The resource URL.\n params: Additional parameters.\n output: The output file. If output is a directory then\n the file name will be according to the patch subject and\n will be downloaded into the output directory.\n If None, a temporary file will be used.\n\n Returns:\n A path to an output file containing the content, else None if stdout\n used.\n \"\"\"\n rsp = _get(url, params, stream=True)\n\n # we don't catch anything here because we should break if these are missing\n header = re.search(\n 'filename=(.+)',\n rsp.headers.get('content-disposition') or '',\n )\n if not header:\n LOG.error('Filename was expected but was not provided in response')\n sys.exit(1)\n\n if output == '-':\n output_path = output\n output_file = sys.stdout.buffer\n else:\n if output:\n output_path = output\n if os.path.isdir(output):\n output_path = os.path.join(output, header.group(1))\n else:\n output_path = os.path.join(\n tempfile.mkdtemp(prefix='git-pw'),\n header.group(1),\n )\n LOG.debug('Saving to %s', output_path)\n output_file = open(output_path, 'wb')\n\n try:\n # we use iter_content because patches can be binary\n for block in rsp.iter_content(1024):\n output_file.write(block)\n finally:\n output_file.close()\n\n return output_path\n\n\ndef index(resource_type: str, params: ty.Optional[Filters] = None) -> dict:\n \"\"\"List API resources.\n\n GET /{resource}/\n\n All resources are JSON bodies, thus we can access them in a similar\n fashion.\n\n Arguments:\n resource_type: The resource endpoint name.\n params: Additional parameters, filters.\n\n Returns:\n A list of dictionaries, representing the summary view of each resource.\n \"\"\"\n # NOTE(stephenfin): All resources must have a trailing '/'\n url = '/'.join([_get_server(), resource_type, ''])\n\n # NOTE(stephenfin): Not all endpoints in the Patchwork API allow filtering\n # by project, but all the ones we care about here do.\n params = params or []\n params.append(('project', _get_project()))\n\n return _get(url, params).json()\n\n\ndef detail(\n resource_type: str,\n resource_id: ty.Union[str, int],\n params: ty.Optional[Filters] = None,\n) -> ty.Dict:\n \"\"\"Retrieve a specific API resource.\n\n GET /{resource}/{resourceID}/\n\n Arguments:\n resource_type: The resource endpoint name.\n resource_id: The ID for the specific resource.\n params: Additional parameters.\n\n Returns:\n A dictionary representing the detailed view of a given resource.\n \"\"\"\n # NOTE(stephenfin): All resources must have a trailing '/'\n url = '/'.join([_get_server(), resource_type, str(resource_id), ''])\n\n return _get(url, params, stream=False).json()\n\n\ndef create(\n resource_type: str,\n data: ty.List[ty.Tuple[str, ty.Any]],\n) -> dict:\n \"\"\"Create a new API resource.\n\n POST /{resource}/\n\n Arguments:\n resource_type: The resource endpoint name.\n params: Fields to update.\n\n Returns:\n A dictionary representing the detailed view of a given resource.\n \"\"\"\n # NOTE(stephenfin): All resources must have a trailing '/'\n url = '/'.join([_get_server(), resource_type, ''])\n\n return _post(url, data).json()\n\n\ndef delete(resource_type: str, resource_id: ty.Union[str, int]) -> None:\n \"\"\"Delete a specific API resource.\n\n DELETE /{resource}/{resourceID}/\n\n Arguments:\n resource_type: The resource endpoint name.\n resource_id: The ID for the specific resource.\n\n Returns:\n A dictionary representing the detailed view of a given resource.\n \"\"\"\n # NOTE(stephenfin): All resources must have a trailing '/'\n url = '/'.join([_get_server(), resource_type, str(resource_id), ''])\n\n _delete(url)\n\n\ndef update(\n resource_type: str,\n resource_id: ty.Union[str, int],\n data: ty.List[ty.Tuple[str, ty.Any]],\n) -> dict:\n \"\"\"Update a specific API resource.\n\n PATCH /{resource}/{resourceID}/\n\n Arguments:\n resource_type: The resource endpoint name.\n resource_id: The ID for the specific resource.\n params: Fields to update.\n\n Returns:\n A dictionary representing the detailed view of a given resource.\n \"\"\"\n # NOTE(stephenfin): All resources must have a trailing '/'\n url = '/'.join([_get_server(), resource_type, str(resource_id), ''])\n\n return _patch(url, data).json()\n\n\ndef validate_minimum_version(\n min_version: ty.Tuple[int, int],\n msg: str,\n) -> ty.Callable[[ty.Any], ty.Any]:\n def inner(f):\n @click.pass_context\n def new_func(ctx, *args, **kwargs):\n if version() < min_version:\n LOG.error(msg)\n sys.exit(1)\n\n return ctx.invoke(f, *args, **kwargs)\n\n return update_wrapper(new_func, f)\n\n return inner\n\n\ndef validate_multiple_filter_support(f: ty.Callable) -> ty.Callable:\n @click.pass_context\n def new_func(ctx, *args, **kwargs):\n if version() >= (1, 1):\n return ctx.invoke(f, *args, **kwargs)\n\n for param in ctx.command.params:\n if not param.multiple:\n continue\n\n if param.name in ('headers'):\n continue\n\n value = list(kwargs[param.name] or [])\n if value and len(value) > 1 and value != param.default:\n msg = (\n 'The `--%s` filter was specified multiple times. '\n 'Filtering by multiple %ss is not supported with API '\n 'version 1.0. If the server supports it, use version '\n '1.1 instead. Refer to https://tinyurl.com/2p8swbpn for '\n 'more information.'\n )\n\n LOG.warning(msg, param.name, param.name)\n\n return ctx.invoke(f, *args, **kwargs)\n\n return update_wrapper(new_func, f)\n\n\ndef retrieve_filter_ids(\n resource_type: str,\n filter_name: str,\n filter_value: str,\n) -> ty.List[ty.Tuple[str, str]]:\n \"\"\"Retrieve IDs for items passed through by filter.\n\n Some filters require client-side filtering, e.g. filtering patches by\n submitter names.\n\n Arguments:\n resource_type: The filter's resource endpoint name.\n filter_name: The name of the filter.\n filter_value: The value of the filter.\n\n Returns:\n A list of querystring key-value pairs to use in the actual request.\n \"\"\"\n if len(filter_value) < 3:\n # protect agaisnt really generic (and essentially meaningless) queries\n LOG.error('Filters must be at least 3 characters long')\n sys.exit(1)\n\n # NOTE(stephenfin): This purposefully ignores the possiblity of a second\n # page because it's unlikely and likely unnecessary\n items = index(resource_type, [('q', filter_value)])\n if len(items) == 0:\n LOG.warning('No matching %s found: %s', filter_name, filter_value)\n elif len(items) > 1 and version() < (1, 1):\n # we don't support multiple filters in 1.0\n msg = (\n 'More than one match for found for `--%s=%s`. '\n 'Filtering by multiple %ss is not supported with '\n 'API version 1.0. If the server supports it, use '\n 'version 1.1 instead. Refer to https://tinyurl.com/2p8swbpn '\n 'for more information.'\n )\n\n LOG.warning(msg, filter_name, filter_value, filter_name)\n\n return [(filter_name, item['id']) for item in items]\n","repo_name":"getpatchwork/git-pw","sub_path":"git_pw/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":14319,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"75"}
+{"seq_id":"17683505114","text":"import re\n\nfrom hw.utils import to_hex\nfrom src.hw.cgra_configuration import CgraConfiguration\n\n\nclass CgraAssembler:\n def __init__(self, cgra, asm_file, output_file=None):\n self.cgra = cgra\n self.cc = CgraConfiguration(cgra)\n self.asm_file = asm_file\n self.output_file = output_file\n self.alu_inst = {}\n self.routes_inst = {}\n self.const = []\n self.accumulator = []\n self.last_error = ''\n self.used_inputs = []\n self.used_outputs = []\n self.ostream_ignore = []\n self.ostream_ignore_loop = []\n\n def reset(self):\n self.alu_inst.clear()\n self.routes_inst.clear()\n self.const.clear()\n self.accumulator.clear()\n self.last_error = ''\n self.used_inputs.clear()\n self.used_outputs.clear()\n self.ostream_ignore.clear()\n self.ostream_ignore_loop.clear()\n\n def parse(self):\n f = open(self.asm_file)\n lines = f.read().split('\\n')\n f.close()\n i = 1\n self.last_error = ''\n for line in lines:\n line = line.split('//')[0]\n if line and line[0] != '#':\n line = re.sub(' +', ' ', line)\n tokens = line.split(' ')\n if tokens[0] == 'route':\n r, v = self.decode_route_inst(tokens)\n if r:\n self.routes_inst[i] = v\n else:\n self.last_error = 'line %d: %s' % (i, v)\n return\n elif tokens[0] == 'set':\n r, v = self.decode_set_inst(i, tokens)\n if not r:\n self.last_error = 'line %d: %s' % (i, v)\n else:\n r, v = self.decode_alu_inst(i, tokens)\n if r:\n self.alu_inst[i] = v\n else:\n self.last_error = 'line %d: %s' % (i, v)\n return\n i += 1\n\n def decode_set_inst(self, line, inst):\n try:\n val = max(int(inst[3]), 1)\n if inst[2] == '$ostream_ignore':\n val *= 3 # 3 é o pipeline atual da alu dos PEs.\n self.ostream_ignore.append((line, int(inst[1][1:]), val))\n elif inst[2] == '$ostream_loop':\n self.ostream_ignore_loop.append((line, int(inst[1][1:]), val))\n elif inst[2] == '$accumulator':\n self.accumulator.append((line, int(inst[1][1:]), val))\n else:\n return False, 'Invalid argument.'\n except Exception as e:\n return False, str(e)\n\n return True, ''\n\n def decode_alu_inst(self, line, inst):\n try:\n op = inst[0]\n pe = int(inst[1][1:])\n alu_src = []\n delays = []\n tok = inst[2:]\n is_istream = False\n for j in range(len(tok)):\n i = tok[j]\n if '#' in i:\n delays.append((j, int(i[1:])))\n else:\n if 'alu' in i or 'istream' in i or 'acc' in i:\n alu_src.append(i[1:])\n if 'istream' in i:\n is_istream = True\n elif '$' in i:\n alu_src.append(int(i[1:]))\n else:\n alu_src.append('const')\n self.const.append((line, pe, int(i)))\n\n for i in range(len(delays)):\n idx, v = delays[i]\n delays[i] = (alu_src.index(alu_src[idx]), v)\n\n except Exception as e:\n return False, str(e)\n if is_istream:\n self.used_inputs.append(pe)\n\n return True, [pe, op, alu_src, delays]\n\n def decode_route_inst(self, inst):\n try:\n pe = int(inst[1][1:])\n if 'alu' in inst[2][1:]:\n src = 'alu'\n else:\n src = int(inst[2][1:])\n if 'ostream' in inst[3][1:]:\n dst = 'ostream'\n else:\n dst = int(inst[3][1:])\n except Exception as e:\n return None, str(e)\n\n if dst == 'ostream':\n self.used_outputs.append(pe)\n return True, [pe, {dst: src}]\n\n def compile(self):\n self.reset()\n self.parse()\n machine_code = ''\n if self.last_error == '':\n for line, conf in self.alu_inst.items():\n r, v = self.cc.create_alu_conf(conf[0], conf[1], conf[2], conf[3])\n if r:\n for c in v:\n machine_code += c + '\\n'\n else:\n self.last_error = 'line %d: %s' % (line, v)\n break\n\n if 'acc' in conf[2]:\n r, v = self.cc.create_reset_conf(conf[0])\n if r:\n for c in v:\n machine_code += c + '\\n'\n else:\n self.last_error = 'line %d: %s' % (line, v)\n break\n\n if self.last_error == '':\n for line, i, const in self.const:\n r, v = self.cc.create_const_conf(i, const)\n if r:\n for c in v:\n machine_code += c + '\\n'\n else:\n self.last_error = 'line %d: %s' % (line, v)\n break\n\n if self.last_error == '':\n for line, i, acc in self.accumulator:\n r, v = self.cc.create_acc_reset_conf(i, acc)\n if r:\n for c in v:\n machine_code += c + '\\n'\n else:\n self.last_error = 'line %d: %s' % (line, v)\n break\n\n if self.last_error == '':\n routing = {}\n for line, c in self.routes_inst.items():\n if c[0] in routing.keys():\n routing[c[0]].update(c[1])\n else:\n routing[c[0]] = c[1]\n\n for line, co in self.routes_inst.items():\n r, v = self.cc.create_router_conf(co[0], routing[co[0]])\n if r:\n for c in v:\n machine_code += c + '\\n'\n else:\n self.last_error = 'line %d: %s' % (line, v)\n break\n\n if self.last_error:\n print('Compile error on %s' % self.last_error)\n return None\n\n if self.output_file:\n f = open(self.output_file, 'w')\n f.write(machine_code[:-1])\n f.close()\n print('Build succeeded, output file save in %s' % self.output_file)\n\n return machine_code[:-1]\n","repo_name":"mfkiwl/hpcgra","sub_path":"src/hw/cgra_assembler.py","file_name":"cgra_assembler.py","file_ext":"py","file_size_in_byte":6852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"4851155936","text":"from __future__ import print_function\n\nfrom OCC.Core.gp import gp_Pnt2d,gp_Pnt,gp_Circ,gp_Ax2\nfrom OCC.Core.Geom import Geom_Circle,Geom_BSplineCurve\nfrom OCC.Core.TColgp import TColgp_Array1OfPnt\nfrom OCC.Core.TColStd import TColStd_Array1OfReal,TColStd_Array1OfInteger\nfrom OCC.Display.SimpleGui import init_display\ndisplay, start_display, add_menu, add_function_to_menu = init_display()\n\n# the first bezier curve\narray = TColgp_Array1OfPnt(1, 4)\narray.SetValue(1, gp_Pnt(0, 0,0))\narray.SetValue(2, gp_Pnt(1, 0,0))\narray.SetValue(3, gp_Pnt(1, 1,0))\narray.SetValue(4, gp_Pnt(3, 3,0))\nweights=TColStd_Array1OfReal(1,4)\nknots=TColStd_Array1OfReal(1,3)\nmultiplicities=TColStd_Array1OfInteger(1,3)\nmultiplicities.SetValue(1,3)\nmultiplicities.SetValue(2,1)\nmultiplicities.SetValue(3,3)\nknots.SetValue(1,0.0)\nknots.SetValue(2,0.5)\nknots.SetValue(3,1.0)\nweights.SetValue(1,1.0)\nweights.SetValue(2,1.0)\nweights.SetValue(3,1.0)\nweights.SetValue(4,1.0)\n\nnurbs = Geom_BSplineCurve(array,weights,knots,multiplicities,2,False,True )\nprint(nurbs.Period())\ndisplay.DisplayShape(nurbs, update=True, color='RED')\nstart_display()\n","repo_name":"chen1180/CurveEditor_pythonOCC","sub_path":"test/create_geometry/create_nurb.py","file_name":"create_nurb.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"75"}
+{"seq_id":"71191860403","text":"from __future__ import absolute_import\n\nimport os\n\nfrom celery import Celery\nfrom django.conf import settings\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings')\n\napp = Celery('config')\n\napp.conf.beat_schedule = {\n 'notify-every-5-min': {\n 'task': 'team_production_system.tasks.notify',\n 'schedule': 300.0,\n },\n}\n\napp.config_from_object('django.conf:settings', namespace='CELERY')\napp.autodiscover_tasks(lambda: settings.INSTALLED_APPS)\n","repo_name":"TeamProductionSystem/Momentors_Backend","sub_path":"config/celery_settings.py","file_name":"celery_settings.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"75"}
+{"seq_id":"10683363775","text":"from LL import LinkedList, Node\r\ndef printIthNode(head, index):\r\n count = 0\r\n pointer = head\r\n while count < index:\r\n pointer = pointer.next\r\n count += 1\r\n print(\"Value at %dth node: \" % (index), pointer.data)\r\nmyList = LinkedList([1, 2, 3, 4, 5])\r\n\r\nprintIthNode(myList.head, 0)","repo_name":"vardhinialuru05/ALGORITHMS","sub_path":"DSA_INTERVIEW/linkedlists/ithnode.py","file_name":"ithnode.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"8971630689","text":"import sys\nimport os\nfrom pathlib import Path\n\nmyDir = os.getcwd()\nsys.path.append(myDir)\npath = Path(myDir)\nabsolute_path = str(path.parent.absolute())\nsys.path.append(absolute_path)\n\nimport requests\nfrom random import randint\nfrom flask import Flask, jsonify, request\nfrom flask_cors import CORS\nfrom src.blockchain.blockchain import Blockchain\nfrom src.blockchain.block import Block\nfrom src.util import is_port_in_use\nfrom src.constant import HOST\n\napp = Flask(__name__)\nCORS(app)\nblockchain = Blockchain()\n\n\n@app.route('/')\ndef index():\n return ''\n\n\n@app.route('/chain')\ndef chain():\n response = {\n 'chain': blockchain.to_json(),\n 'len': len(blockchain.chain)\n }\n return jsonify(response)\n\n\n@app.route('/length')\ndef chain_length():\n return jsonify({'len': len(blockchain.chain)})\n\n\n@app.route('/chain/add/block', methods=['POST'])\ndef add_block():\n response = {}\n\n potential_block_json = request.get_json()\n\n if blockchain.add_block(Block.from_json(potential_block_json)):\n block = blockchain.chain[-1]\n response['block'] = block.to_json()\n response['added'] = True\n else:\n response['block'] = 'Invalid block'\n response['added'] = False\n\n return jsonify(response)\n\n\n@app.route('/chain/broadcast')\ndef broadcast_chain():\n ports = all_other_ports()\n\n bc_response = {\n 'success_ports': [],\n 'fail_ports': []\n }\n\n for port in ports:\n url = f'http://{HOST}:{port}/chain/resolve'\n response = requests.post(url, json=blockchain.to_json())\n if response.status_code == 200:\n if response.json()['success']:\n bc_response['success_ports'].append(port)\n else:\n bc_response['fail_ports'].append(port)\n\n return jsonify(bc_response)\n\n\n@app.route('/chain/resolve', methods=['POST'])\ndef resolve_chain():\n rs_chain_json = request.get_json()\n rs_chain = Blockchain.from_json(rs_chain_json)\n\n response = {\n 'message': f'Current len: {len(blockchain.chain)}\\nIncoming len: {len(rs_chain.chain)}'\n }\n\n try:\n if blockchain.replace_chain(rs_chain):\n response['success'] = True\n else:\n response['success'] = False\n except Exception:\n raise ValueError('Invalid chain')\n\n return jsonify(response)\n\n\ndef all_other_ports():\n url = f'http://{HOST}:8001/ports/all/other/?current_port={server_port}'\n response = requests.get(url)\n if response.status_code == 200:\n return response.json()['ports']\n else:\n print('Cannot connect to ports manager')\n return ''\n\n\nif __name__ == '__main__':\n server_port = randint(5001, 5999)\n\n while is_port_in_use(server_port):\n server_port = randint(5001, 5999)\n\n print('======= register node ========')\n request_url = f'http://{HOST}:8001/ports/add/?port={str(server_port)}'\n requests.post(request_url)\n\n app.run(host=HOST, port=server_port, debug=True, use_reloader=False)\n","repo_name":"zwan0202/double-spend-attack","sub_path":"src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2989,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"}
+{"seq_id":"20418172495","text":"\"\"\"\nSensor for Mopar vehicles.\n\nFor more details about this platform, please refer to the documentation at\nhttps://home-assistant.io/components/sensor.mopar/\n\"\"\"\nfrom datetime import timedelta\nimport logging\n\nimport voluptuous as vol\n\nfrom homeassistant.components.sensor import DOMAIN, PLATFORM_SCHEMA\nfrom homeassistant.const import (\n ATTR_ATTRIBUTION, ATTR_COMMAND, CONF_PASSWORD, CONF_PIN, CONF_USERNAME,\n LENGTH_KILOMETERS)\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.helpers.entity import Entity\nfrom homeassistant.util import Throttle\n\nREQUIREMENTS = ['motorparts==1.0.2']\n\n_LOGGER = logging.getLogger(__name__)\n\nATTR_VEHICLE_INDEX = 'vehicle_index'\n\nCOOKIE_FILE = 'mopar_cookies.pickle'\n\nMIN_TIME_BETWEEN_UPDATES = timedelta(days=7)\n\nSERVICE_REMOTE_COMMAND = 'mopar_remote_command'\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({\n vol.Required(CONF_USERNAME): cv.string,\n vol.Required(CONF_PASSWORD): cv.string,\n vol.Required(CONF_PIN): cv.positive_int,\n})\n\nREMOTE_COMMAND_SCHEMA = vol.Schema({\n vol.Required(ATTR_COMMAND): cv.string,\n vol.Required(ATTR_VEHICLE_INDEX): cv.positive_int\n})\n\n\n# pylint: disable=unused-argument\ndef setup_platform(hass, config, add_devices, discovery_info=None):\n \"\"\"Set up the Mopar platform.\"\"\"\n import motorparts\n cookie = hass.config.path(COOKIE_FILE)\n try:\n session = motorparts.get_session(\n config.get(CONF_USERNAME), config.get(CONF_PASSWORD),\n config.get(CONF_PIN), cookie_path=cookie)\n except motorparts.MoparError:\n _LOGGER.error(\"Failed to login\")\n return\n\n def _handle_service(service):\n \"\"\"Handle service call.\"\"\"\n index = service.data.get(ATTR_VEHICLE_INDEX)\n command = service.data.get(ATTR_COMMAND)\n try:\n motorparts.remote_command(session, command, index)\n except motorparts.MoparError as error:\n _LOGGER.error(str(error))\n\n hass.services.register(DOMAIN, SERVICE_REMOTE_COMMAND, _handle_service,\n schema=REMOTE_COMMAND_SCHEMA)\n\n data = MoparData(session)\n add_devices([MoparSensor(data, index)\n for index, _ in enumerate(data.vehicles)], True)\n\n\nclass MoparData(object):\n \"\"\"Container for Mopar vehicle data.\n\n Prevents session expiry re-login race condition.\n \"\"\"\n\n def __init__(self, session):\n \"\"\"Initialize data.\"\"\"\n self._session = session\n self.vehicles = []\n self.vhrs = {}\n self.tow_guides = {}\n self.update()\n\n @Throttle(MIN_TIME_BETWEEN_UPDATES)\n def update(self, **kwargs):\n \"\"\"Update data.\"\"\"\n import motorparts\n _LOGGER.info(\"Updating vehicle data\")\n try:\n self.vehicles = motorparts.get_summary(self._session)['vehicles']\n except motorparts.MoparError:\n _LOGGER.exception(\"Failed to get summary\")\n return\n for index, _ in enumerate(self.vehicles):\n try:\n self.vhrs[index] = motorparts.get_report(self._session, index)\n self.tow_guides[index] = motorparts.get_tow_guide(\n self._session, index)\n except motorparts.MoparError:\n _LOGGER.warning(\"Failed to update for vehicle index %s\", index)\n\n\nclass MoparSensor(Entity):\n \"\"\"Mopar vehicle sensor.\"\"\"\n\n def __init__(self, data, index):\n \"\"\"Initialize the sensor.\"\"\"\n self._index = index\n self._vehicle = {}\n self._vhr = {}\n self._tow_guide = {}\n self._odometer = None\n self._data = data\n\n def update(self):\n \"\"\"Update device state.\"\"\"\n self._data.update()\n self._vehicle = self._data.vehicles[self._index]\n self._vhr = self._data.vhrs.get(self._index, {})\n self._tow_guide = self._data.tow_guides.get(self._index, {})\n if 'odometer' in self._vhr:\n odo = float(self._vhr['odometer'])\n self._odometer = int(self.hass.config.units.length(\n odo, LENGTH_KILOMETERS))\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return '{} {} {}'.format(\n self._vehicle['year'], self._vehicle['make'],\n self._vehicle['model'])\n\n @property\n def state(self):\n \"\"\"Return the state of the sensor.\"\"\"\n return self._odometer\n\n @property\n def device_state_attributes(self):\n \"\"\"Return the state attributes.\"\"\"\n import motorparts\n attributes = {\n ATTR_VEHICLE_INDEX: self._index,\n ATTR_ATTRIBUTION: motorparts.ATTRIBUTION\n }\n attributes.update(self._vehicle)\n attributes.update(self._vhr)\n attributes.update(self._tow_guide)\n return attributes\n\n @property\n def unit_of_measurement(self):\n \"\"\"Return the unit of measurement.\"\"\"\n return self.hass.config.units.length_unit\n\n @property\n def icon(self):\n \"\"\"Return the icon.\"\"\"\n return 'mdi:car'\n","repo_name":"jest-community/jest-pytest","sub_path":"src/__tests__/integration/home-assistant/homeassistant/components/sensor/mopar.py","file_name":"mopar.py","file_ext":"py","file_size_in_byte":5010,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"75"}
+{"seq_id":"27271090132","text":"from rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.exceptions import APIException, ParseError\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom openedx.core.djangoapps.schedules.utils import reset_self_paced_schedule\n\n\nclass UnableToResetDeadlines(APIException):\n status_code = 400\n default_detail = 'Unable to reset deadlines.'\n default_code = 'unable_to_reset_deadlines'\n\n\n@permission_classes((IsAuthenticated,))\n@api_view(['POST'])\ndef reset_course_deadlines(request):\n course_key = request.data.get('course_key', None)\n\n # If body doesnt contain 'course_key', return 400 to client.\n if not course_key:\n raise ParseError(\"'course_key' is required.\")\n\n # If body contains params other than 'course_key', return 400 to client.\n if len(request.data) > 1:\n raise ParseError(\"Only 'course_key' is expected.\")\n\n try:\n reset_self_paced_schedule(request.user, course_key)\n return Response({'message': 'Deadlines successfully reset.'})\n except Exception:\n raise UnableToResetDeadlines\n","repo_name":"JimmyKosgei/jimmykkedx","sub_path":"openedx/features/course_experience/api/v1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"29541743422","text":"import json\r\nimport gzip\r\nimport array\r\nimport numpy as np\r\n#数据处理第二步,根据5-core的评论生成rating的csv文件\r\n\r\ndef parse(path):\r\n g = gzip.open(path, 'r')\r\n for l in g:\r\n yield eval(l)\r\n\r\ndef readImageAsin(path):\r\n f = open(path, 'rb')\r\n try:\r\n while True:\r\n asin = f.read(10)\r\n if asin == '' : \r\n break\r\n a = array.array('f')\r\n a.fromfile(f, 4096)\r\n yield asin\r\n except EOFError:\r\n pass\r\n\r\ncategory = 'Movies_and_TV'\r\nk_core = '_5'\r\nusers = []\r\nitems = []\r\nratings = []\r\ncount = 0\r\n\r\nfor review in parse(category + \"/reviews_\" + category + k_core +\".json.gz\"):\r\n users.append(review['reviewerID'])\r\n items.append(review['asin'])\r\n ratings.append(review['overall'])\r\n count += 1\r\ndata_amount = count\r\nprint('Read reviews over, data amount = ', data_amount)\r\n\r\nfrating = open(category + \"/ratings_\" + category + k_core + \".csv\", 'w')\r\nfor user, item, rating in zip(users, items, ratings):\r\n frating.write(user + ',' + item + ',' + str(int(rating)) +'\\n')\r\n","repo_name":"XMUDM/Deamer","sub_path":"data_preprocess/generate_rating_5.py","file_name":"generate_rating_5.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"}
+{"seq_id":"14970936017","text":"\"\"\"\nWrite a function that determines whether an array contain a pair\nof numbers that sum up to a certain integer and returns that pair\nI.e. 10, [3, 4, 1, 2, 8] -> (2, 9)\n\"\"\"\n\n\ndef contains_sum_of(number, array):\n \"\"\"Algo: for every number, we know what the number is that need to be added\n to get a sum. Iretate over the array, add every number in the array to a dictionary,\n and see if the summing number is in the dictionary.\n Complexity: O(n)\n \"\"\"\n pairs = []\n\n # Dictionary of numbers already seen\n numbers_seen = {}\n second_number = None\n for n in array:\n summing_number = number - n\n if summing_number in numbers_seen:\n pairs.append((n, summing_number))\n else:\n numbers_seen[n] = 1\n\n return pairs if pairs else None\n\nprint(contains_sum_of(10, [3, 4, 1, 2, 2, 5 ,5, 8]))\n\nprint(contains_sum_of(10, [3, 4]))\n","repo_name":"ssarber/PythonClass","sub_path":"algorithms/two_number_sum.py","file_name":"two_number_sum.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"27160407317","text":"from osgeo import gdal, ogr\nimport subprocess\n\n#Variables\nOutlets = './static/uploads/Outlet.shp'\nBasin = './static/uploads/Basin.shp'\nSource = './static/uploads/Source.shp'\n\ndef ContaminationFlow():\n \n BASINang = './static/uploads/BASINang.tif'\n BASINdm = './static/uploads/BASINdm.tif'\n SourceR = './static/uploads/SourceR.tif'\n BASINdg = './static/uploads/BASINdg.tif'\n BASINctpt = './static/uploads/BASINctpt.tif'\n BASINnet = './static/uploads/BASINsrcndCrop.tif'\n ContamArea = './static/uploads/ContaminationAreas.tif'\n R1 = './static/uploads/R1.tif'\n R2 = './static/uploads/R2.tif'\n R3 = './static/uploads/R3.tif'\n ContamShp = './static/uploads/ContaminationAreas.shp'\n \n \n raster = gdal.Open('./static/uploads/BASINang.tif')\n gt =raster.GetGeoTransform()\n pixelSizeX = gt[1]\n pixelSizeY =-gt[5]\n print (pixelSizeX)\n print (pixelSizeY)\n minx = gt[0]\n maxy = gt[3]\n maxx = minx + gt[1]*raster.RasterXSize\n miny = maxy + gt[5]*raster.RasterYSize\n print (minx,miny,maxx,maxy)\n \n # Generate Contaminate Flow - including fertilisers\n #Rasterize Source\n cmd1=\"gdal_rasterize -a id -tr '{0}' '{1}' -a_nodata 0.0 -te '{2}' '{3}' '{4}' '{5}' -ot Float32 -of GTiff '{6}' '{7}'\".format(pixelSizeX, pixelSizeY, minx, miny, maxx, maxy, Source, SourceR)\n subprocess.call(cmd1, shell=True)\n #Translate Source\n cmd2=\"gdal_translate -a_nodata -9999 -of GTiff '{0}' '{1}'\".format(SourceR, BASINdg)\n subprocess.call(cmd2, shell=True)\n #Rasterize Basin\n cmd3=\"gdal_rasterize -a fid -tr '{0}' '{1}' -a_nodata 0.0 -te '{2}' '{3}' '{4}' '{5}' -ot Float32 -of GTiff '{6}' '{7}'\".format(pixelSizeX, pixelSizeY, minx, miny, maxx, maxy, Basin, BASINdm) \n subprocess.call(cmd3, shell=True)\n #Execute contamination flow command\n cmd4 = \"mpiexec /usr/local/taudem/dinfconclimaccum -ang '{0}' -dg '{1}' -dm '{2}' -ctpt '{3}' -q '{4}' -csol 1 -o '{5}' -nc\".format(BASINang, BASINdg, BASINdm, BASINctpt, BASINdm, Outlets)\n subprocess.call(cmd4, shell=True)\n \n #Output Contamination Areas\n cmd5=\"gdal_translate -a_nodata -9999 -of GTiff '{0}' '{1}'\".format(BASINctpt, R1)\n subprocess.call(cmd5, shell=True)\n cmd6=\"gdalwarp -of GTiff -cutline '{0}' -crop_to_cutline '{1}' '{2}'\".format(Basin, R1, R2)\n subprocess.call(cmd6, shell=True)\n cmd7=\"gdal_translate -a_nodata -9999 -of GTiff '{0}' '{1}'\".format(R2, R3)\n subprocess.call(cmd7, shell=True)\n cmd8=\"gdal_calc.py --calc 'A*logical_not(A<0)' --format GTiff --type Float32 -A '{0}' --A_band 1 --outfile '{1}'\".format(R3, ContamArea)\n subprocess.call(cmd8, shell=True)\n cmd9=\"gdal_polygonize.py '{0}' -8 -b 1 -f 'ESRI Shapefile' '{1}'\".format(ContamArea, ContamShp)\n subprocess.call(cmd9, shell=True)\n \n shapefile = ogr.Open(ContamShp, 1)\n \n layer = shapefile.GetLayer()\n layer.SetAttributeFilter(\"DN = 0\")\n \n for feat in layer:\n print (feat.GetField(\"DN\"))\n layer.DeleteFeature(feat.GetFID())\n\n\nif __name__ == '__main__':\n\n ContaminationFlow()\n","repo_name":"jrc15/geomFlaskApp","sub_path":"FlaskApp/HydroContamination.py","file_name":"HydroContamination.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"19438509610","text":"# -*- coding: utf-8 -*-\r\nimport shutil\r\nimport dlib # 人脸识别的库dlib\r\nimport numpy as np # 数据处理的库numpy\r\nimport wx # 构造显示界面的GUI\r\nimport wx.xrc\r\nimport wx.adv\r\n# import the necessary packages\r\nfrom scipy.spatial import distance as dist\r\nfrom imutils.video import FileVideoStream\r\nfrom imutils.video import VideoStream\r\nfrom imutils import face_utils\r\nimport argparse\r\nimport imutils\r\nimport datetime, time\r\nimport math\r\nimport os\r\nimport pandas as pd\r\nimport winsound # 系统音效\r\nfrom playsound import playsound # 音频播放\r\nimport csv # 存入表格\r\nimport time\r\nimport sys\r\nimport numpy as np # 数据处理的库 numpy\r\nfrom cv2 import cv2 as cv2 # 图像处理的库 OpenCv\r\nimport pandas as pd # 数据处理的库 Pandas\r\nfrom skimage import io\r\nimport socket\r\nimport codecs\r\n## Class Fatigue_detecting\r\n###########################################################################\r\n\r\nCOVER = 'G:/pycharm project/python project/face detecting/images/camera.png'\r\n\r\n\r\nfacerec = dlib.face_recognition_model_v1(\r\n \"G:/pycharm project/python project/face detecting/model/dlib_face_recognition_resnet_model_v1.dat\")\r\n# 用来存放所有录入人脸特征的数组\r\n# the array to save the features of faces in the database\r\nfeatures_known_arr = []\r\n\r\ndetector = dlib.get_frontal_face_detector()\r\n\r\nface_rec = dlib.face_recognition_model_v1(\"G:/pycharm project/python project/face detecting/model/dlib_face_recognition_resnet_model_v1.dat\")\r\n\r\npredictor = dlib.shape_predictor(\"G:/pycharm project/python project/face detecting/model/shape_predictor_68_face_landmarks.dat\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n# \"\"\"\r\n# client\r\n# connect()\r\n# recv()\r\n# send()\r\n# sendall()\r\n# \"\"\"\r\n# # 创建套接字,绑定套接字到本地IP与端口\r\n# sk = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n# # address = ('10.1.156.82', 8001)\r\n# sk.connect(('10.1.156.82', 8001))\r\n# inp = '030300000002c5e9'\r\n# while True:\r\n# if inp == 'exit':\r\n# print(\"exit\")\r\n# break\r\n# # 默认编码为十六进制编码\r\n# sk.send(codecs.decode(inp, 'hex'))\r\n# # 每2秒读取以此数据\r\n# time.sleep(2)\r\n# # 每次接受1024字节的数据\r\n# result = sk.recv(1024)\r\n# result = codecs.encode(result, 'hex')\r\n# r = bytes(result).decode('utf-8')\r\n# shidu = int(r[6:10], 16) / 100\r\n# wendu = int(r[10:14], 16) / 100\r\n# print(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\r\n# print(\"温度:%s,湿度:%s\\n\" % (wendu, shidu))\r\nsk.close()\r\ndef return_128d_features(path_img):\r\n im_rd = io.imread(path_img)\r\n img_gray = cv2.cvtColor(im_rd, cv2.COLOR_BGR2RGB)\r\n faces = detector(img_gray, 1)\r\n\r\n print(\"%-40s %-20s\" % (\"检测到人脸的图像 / image with faces detected:\", path_img), '\\n')\r\n\r\n # 因为有可能截下来的人脸再去检测,检测不出来人脸了\r\n # 所以要确保是 检测到人脸的人脸图像 拿去算特征\r\n if len(faces) != 0:\r\n shape = predictor(img_gray, faces[0])\r\n face_descriptor = face_rec.compute_face_descriptor(img_gray, shape)\r\n else:\r\n face_descriptor = 0\r\n print(\"no face\")\r\n\r\n return face_descriptor\r\n\r\ndef return_features_mean_personX(path_faces_personX):\r\n features_list_personX = []\r\n photos_list = os.listdir(path_faces_personX)\r\n if photos_list:\r\n for i in range(len(photos_list)):\r\n # 调用return_128d_features()得到128d特征\r\n print(\"%-40s %-20s\" % (\"正在读的人脸图像 / image to read:\", path_faces_personX + \"/\" + photos_list[i]))\r\n features_128d = return_128d_features(path_faces_personX + \"/\" + photos_list[i])\r\n # print(features_128d)\r\n # 遇到没有检测出人脸的图片跳过\r\n if features_128d == 0:\r\n i += 1\r\n else:\r\n features_list_personX.append(features_128d)\r\n else:\r\n print(\"文件夹内图像文件为空 / Warning: No images in \" + path_faces_personX + '/', '\\n')\r\n\r\n # 计算 128D 特征的均值\r\n # N x 128D -> 1 x 128D\r\n if features_list_personX:\r\n features_mean_personX = np.array(features_list_personX).mean(axis=0)\r\n else:\r\n features_mean_personX = '0'\r\n\r\n return features_mean_personX\r\npath_images_from_camera = \"G:/pycharm project/python project/face detecting/pictures/people/\"\r\nclass Fatigue_detecting(wx.Frame):\r\n\r\n def __init__(self, parent, title):\r\n wx.Frame.__init__(self, parent, id=wx.ID_ANY, title=title, pos=wx.DefaultPosition, size=wx.Size(873, 535),\r\n style=wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL)\r\n # wx.Frame.__init__(self, parent, id=wx.ID_ANY, title=title, pos=wx.DefaultPosition, size=wx.Size(900, 700),style=wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL)\r\n self.SetSizeHints(wx.DefaultSize, wx.DefaultSize)\r\n self.SetBackgroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_MENU))\r\n\r\n bSizer1 = wx.BoxSizer(wx.VERTICAL)\r\n bSizer2 = wx.BoxSizer(wx.HORIZONTAL)\r\n bSizer3 = wx.BoxSizer(wx.VERTICAL)\r\n\r\n self.m_animCtrl1 = wx.adv.AnimationCtrl(self, wx.ID_ANY, wx.adv.NullAnimation, wx.DefaultPosition,\r\n wx.DefaultSize, wx.adv.AC_DEFAULT_STYLE)\r\n bSizer3.Add(self.m_animCtrl1, 1, wx.ALL | wx.EXPAND, 5)\r\n bSizer2.Add(bSizer3, 9, wx.EXPAND, 5)\r\n bSizer4 = wx.BoxSizer(wx.VERTICAL)\r\n sbSizer1 = wx.StaticBoxSizer(wx.StaticBox(self, wx.ID_ANY, u\"parameters setting\"), wx.VERTICAL)\r\n sbSizer2 = wx.StaticBoxSizer(wx.StaticBox(sbSizer1.GetStaticBox(), wx.ID_ANY, u\"video source\"), wx.VERTICAL)\r\n gSizer1 = wx.GridSizer(0, 2, 0, 8)\r\n m_choice1Choices = [u\"camera_0\", u\"camera_1\", u\"camera_2\"]\r\n self.m_choice1 = wx.Choice(sbSizer2.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.Size(90, 25),\r\n m_choice1Choices, 0)\r\n self.m_choice1.SetSelection(0)\r\n gSizer1.Add(self.m_choice1, 0, wx.ALL, 5)\r\n self.camera_button1 = wx.Button(sbSizer2.GetStaticBox(), wx.ID_ANY, u\"start detecting\", wx.DefaultPosition,\r\n wx.Size(90, 25), 0)\r\n gSizer1.Add(self.camera_button1, 0, wx.ALL, 5)\r\n self.vedio_button2 = wx.Button(sbSizer2.GetStaticBox(), wx.ID_ANY, u\"open video file\", wx.DefaultPosition,\r\n wx.Size(90, 25), 0)\r\n gSizer1.Add(self.vedio_button2, 0, wx.ALL, 5)\r\n\r\n self.off_button3 = wx.Button(sbSizer2.GetStaticBox(), wx.ID_ANY, u\"pause\", wx.DefaultPosition, wx.Size(90, 25), 0)\r\n gSizer1.Add(self.off_button3, 0, wx.ALL, 5)\r\n sbSizer2.Add(gSizer1, 1, wx.EXPAND, 5)\r\n sbSizer1.Add(sbSizer2, 2, wx.EXPAND, 5)\r\n\r\n self.information_button1 = wx.Button(sbSizer2.GetStaticBox(), wx.ID_ANY, u\"upload \",\r\n wx.DefaultPosition,\r\n wx.Size(90, 25), 0)\r\n gSizer1.Add(self.information_button1, 0, wx.ALL, 5)\r\n\r\n self.information_button2 = wx.Button(sbSizer2.GetStaticBox(), wx.ID_ANY, u\"update \",\r\n wx.DefaultPosition,\r\n wx.Size(90, 25), 0)\r\n gSizer1.Add(self.information_button2, 0, wx.ALL, 5)\r\n\r\n sbSizer3 = wx.StaticBoxSizer(wx.StaticBox(sbSizer1.GetStaticBox(), wx.ID_ANY, u\"fatigue detecting\"), wx.VERTICAL)\r\n bSizer5 = wx.BoxSizer(wx.HORIZONTAL)\r\n self.yawn_checkBox1 = wx.CheckBox(sbSizer3.GetStaticBox(), wx.ID_ANY, u\"yawning detecting\", wx.Point(-1, -1),\r\n wx.Size(-1, 15), 0)\r\n self.yawn_checkBox1.SetValue(True)\r\n bSizer5.Add(self.yawn_checkBox1, 0, wx.ALL, 5)\r\n self.blink_checkBox2 = wx.CheckBox(sbSizer3.GetStaticBox(), wx.ID_ANY, u\"blinking detecting\", wx.Point(-1, -1),\r\n wx.Size(-1, 15), 0)\r\n self.blink_checkBox2.SetValue(True)\r\n bSizer5.Add(self.blink_checkBox2, 0, wx.ALL, 5)\r\n sbSizer3.Add(bSizer5, 1, wx.EXPAND, 5)\r\n bSizer6 = wx.BoxSizer(wx.HORIZONTAL)\r\n self.nod_checkBox7 = wx.CheckBox(sbSizer3.GetStaticBox(), wx.ID_ANY, u\"nodding detecting\", wx.Point(-1, -1), wx.Size(-1, 15),\r\n 0)\r\n self.nod_checkBox7.SetValue(True)\r\n bSizer6.Add(self.nod_checkBox7, 0, wx.ALL, 5)\r\n self.m_staticText1 = wx.StaticText(sbSizer3.GetStaticBox(), wx.ID_ANY, u\"testing interval(s):\", wx.DefaultPosition,\r\n wx.Size(-1, 15), 0)\r\n self.m_staticText1.Wrap(-1)\r\n bSizer6.Add(self.m_staticText1, 0, wx.ALL, 5)\r\n m_listBox2Choices = [u\"3\", u\"4\", u\"5\", u\"6\", u\"7\", u\"8\"]\r\n self.m_listBox2 = wx.ListBox(sbSizer3.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.Size(50, 24),\r\n m_listBox2Choices, 0)\r\n bSizer6.Add(self.m_listBox2, 0, 0, 5)\r\n sbSizer3.Add(bSizer6, 1, wx.EXPAND, 5)\r\n sbSizer1.Add(sbSizer3, 2, 0, 5)\r\n sbSizer4 = wx.StaticBoxSizer(wx.StaticBox(sbSizer1.GetStaticBox(), wx.ID_ANY, u\"absences detecting\"), wx.VERTICAL)\r\n bSizer8 = wx.BoxSizer(wx.HORIZONTAL)\r\n self.m_checkBox4 = wx.CheckBox(sbSizer4.GetStaticBox(), wx.ID_ANY, u\"absences detecting\", wx.DefaultPosition, wx.Size(-1, 15),\r\n 0)\r\n self.m_checkBox4.SetValue(True)\r\n bSizer8.Add(self.m_checkBox4, 0, wx.ALL, 5)\r\n self.m_staticText2 = wx.StaticText(sbSizer4.GetStaticBox(), wx.ID_ANY, u\"absences interval(s):\", wx.DefaultPosition,\r\n wx.Size(-1, 15), 0)\r\n self.m_staticText2.Wrap(-1)\r\n bSizer8.Add(self.m_staticText2, 0, wx.ALL, 5)\r\n m_listBox21Choices = [u\"5\", u\"10\", u\"15\", u\"20\", u\"25\", u\"30\"]\r\n self.m_listBox21 = wx.ListBox(sbSizer4.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.Size(50, 24),\r\n m_listBox21Choices, 0)\r\n bSizer8.Add(self.m_listBox21, 0, 0, 5)\r\n sbSizer4.Add(bSizer8, 1, 0, 5)\r\n sbSizer1.Add(sbSizer4, 1, 0, 5)\r\n #sbSizer5 = wx.StaticBoxSizer(wx.StaticBox(sbSizer1.GetStaticBox(), wx.ID_ANY, u\"analysis area\"), wx.VERTICAL)\r\n bSizer9 = wx.BoxSizer(wx.HORIZONTAL)\r\n #self.m_staticText3 = wx.StaticText(sbSizer5.GetStaticBox(), wx.ID_ANY, u\"analysis area: \", wx.DefaultPosition,\r\n #wx.DefaultSize, 0)\r\n #self.m_staticText3.Wrap(-1)\r\n #bSizer9.Add(self.m_staticText3, 0, wx.ALL, 5)\r\n #m_choice2Choices = [u\"full screen\", u\"part of screen\"]\r\n #self.m_choice2 = wx.Choice(sbSizer5.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize,\r\n #m_choice2Choices, 0)\r\n #self.m_choice2.SetSelection(0)\r\n #bSizer9.Add(self.m_choice2, 0, wx.ALL, 5)\r\n #sbSizer5.Add(bSizer9, 1, wx.EXPAND, 5)\r\n #sbSizer1.Add(sbSizer5, 1, 0, 5)\r\n sbSizer6 = wx.StaticBoxSizer(wx.StaticBox(sbSizer1.GetStaticBox(), wx.ID_ANY, u\"status output\"), wx.VERTICAL)\r\n self.m_textCtrl3 = wx.TextCtrl(sbSizer6.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition,\r\n wx.DefaultSize, wx.TE_MULTILINE | wx.TE_READONLY)\r\n sbSizer6.Add(self.m_textCtrl3, 1, wx.ALL | wx.EXPAND, 5)\r\n sbSizer1.Add(sbSizer6, 5, wx.EXPAND, 5)\r\n bSizer4.Add(sbSizer1, 1, wx.EXPAND, 5)\r\n bSizer2.Add(bSizer4, 3, wx.EXPAND, 5)\r\n bSizer1.Add(bSizer2, 1, wx.EXPAND, 5)\r\n\r\n self.SetSizer(bSizer1)\r\n self.Layout()\r\n self.Centre(wx.BOTH)\r\n\r\n # Connect Events\r\n self.m_choice1.Bind(wx.EVT_CHOICE, self.cameraid_choice) # 绑定事件\r\n self.camera_button1.Bind(wx.EVT_BUTTON, self.camera_on) # 开\r\n self.vedio_button2.Bind(wx.EVT_BUTTON, self.vedio_on)\r\n self.off_button3.Bind(wx.EVT_BUTTON, self.off) # 关\r\n self.information_button1.Bind(wx.EVT_BUTTON, self.upload)\r\n self.information_button2.Bind(wx.EVT_BUTTON, self.update)\r\n\r\n self.m_listBox2.Bind(wx.EVT_LISTBOX, self.AR_CONSEC_FRAMES) # 闪烁阈值设置\r\n self.m_listBox21.Bind(wx.EVT_LISTBOX, self.OUT_AR_CONSEC_FRAMES) # 脱岗时间设置\r\n\r\n # 封面图片\r\n self.image_cover = wx.Image(COVER, wx.BITMAP_TYPE_ANY)\r\n # 显示图片在m_animCtrl1上\r\n self.bmp = wx.StaticBitmap(self.m_animCtrl1, -1, wx.Bitmap(self.image_cover))\r\n\r\n # 设置窗口标题的图标\r\n self.icon = wx.Icon('./images/123.ico', wx.BITMAP_TYPE_ICO)\r\n self.SetIcon(self.icon)\r\n # 系统事件\r\n self.Bind(wx.EVT_CLOSE, self.OnClose)\r\n print(\"wxpython interface initialization is complete!\")\r\n \"\"\"参数\"\"\"\r\n # 默认为摄像头0\r\n self.VIDEO_STREAM = 0\r\n self.CAMERA_STYLE = False # False未打开摄像头,True摄像头已打开\r\n # 闪烁阈值(秒)\r\n self.AR_CONSEC_FRAMES_check = 3\r\n self.OUT_AR_CONSEC_FRAMES_check = 5\r\n # 眼睛长宽比\r\n self.EYE_AR_THRESH = 0.2\r\n self.EYE_AR_CONSEC_FRAMES = self.AR_CONSEC_FRAMES_check\r\n # 打哈欠长宽比\r\n self.MAR_THRESH = 0.5\r\n self.MOUTH_AR_CONSEC_FRAMES = self.AR_CONSEC_FRAMES_check\r\n # 瞌睡点头\r\n self.HAR_THRESH = 0.3\r\n self.NOD_AR_CONSEC_FRAMES = self.AR_CONSEC_FRAMES_check\r\n\r\n \"\"\"计数\"\"\"\r\n # 初始化帧计数器和眨眼总数\r\n self.COUNTER = 0\r\n self.TOTAL = 0\r\n # 初始化帧计数器和打哈欠总数\r\n self.mCOUNTER = 0\r\n self.mTOTAL = 0\r\n # 初始化帧计数器和点头总数\r\n self.hCOUNTER = 0\r\n self.hTOTAL = 0\r\n # 离职时间长度\r\n self.oCOUNTER = 0\r\n\r\n \"\"\"姿态\"\"\"\r\n # 世界坐标系(UVW):填写3D参考点,该模型参考http://aifi.isr.uc.pt/Downloads/OpenGL/glAnthropometric3DModel.cpp\r\n self.object_pts = np.float32([[6.825897, 6.760612, 4.402142], # 33左眉左上角\r\n [1.330353, 7.122144, 6.903745], # 29左眉右角\r\n [-1.330353, 7.122144, 6.903745], # 34右眉左角\r\n [-6.825897, 6.760612, 4.402142], # 38右眉右上角\r\n [5.311432, 5.485328, 3.987654], # 13左眼左上角\r\n [1.789930, 5.393625, 4.413414], # 17左眼右上角\r\n [-1.789930, 5.393625, 4.413414], # 25右眼左上角\r\n [-5.311432, 5.485328, 3.987654], # 21右眼右上角\r\n [2.005628, 1.409845, 6.165652], # 55鼻子左上角\r\n [-2.005628, 1.409845, 6.165652], # 49鼻子右上角\r\n [2.774015, -2.080775, 5.048531], # 43嘴左上角\r\n [-2.774015, -2.080775, 5.048531], # 39嘴右上角\r\n [0.000000, -3.116408, 6.097667], # 45嘴中央下角\r\n [0.000000, -7.415691, 4.070434]]) # 6下巴角\r\n\r\n # 相机坐标系(XYZ):添加相机内参\r\n self.K = [6.5308391993466671e+002, 0.0, 3.1950000000000000e+002,\r\n 0.0, 6.5308391993466671e+002, 2.3950000000000000e+002,\r\n 0.0, 0.0, 1.0] # 等价于矩阵[fx, 0, cx; 0, fy, cy; 0, 0, 1]\r\n # 图像中心坐标系(uv):相机畸变参数[k1, k2, p1, p2, k3]\r\n self.D = [7.0834633684407095e-002, 6.9140193737175351e-002, 0.0, 0.0, -1.3073460323689292e+000]\r\n\r\n # 像素坐标系(xy):填写凸轮的本征和畸变系数\r\n self.cam_matrix = np.array(self.K).reshape(3, 3).astype(np.float32)\r\n self.dist_coeffs = np.array(self.D).reshape(5, 1).astype(np.float32)\r\n\r\n # 重新投影3D点的世界坐标轴以验证结果姿势\r\n self.reprojectsrc = np.float32([[10.0, 10.0, 10.0],\r\n [10.0, 10.0, -10.0],\r\n [10.0, -10.0, -10.0],\r\n [10.0, -10.0, 10.0],\r\n [-10.0, 10.0, 10.0],\r\n [-10.0, 10.0, -10.0],\r\n [-10.0, -10.0, -10.0],\r\n [-10.0, -10.0, 10.0]])\r\n # 绘制正方体12轴\r\n self.line_pairs = [[0, 1], [1, 2], [2, 3], [3, 0],\r\n [4, 5], [5, 6], [6, 7], [7, 4],\r\n [0, 4], [1, 5], [2, 6], [3, 7]]\r\n\r\n def __del__(self):\r\n pass\r\n\r\n def get_head_pose(self, shape): # 头部姿态估计\r\n # (像素坐标集合)填写2D参考点,注释遵循https://ibug.doc.ic.ac.uk/resources/300-W/\r\n # 17左眉左上角/21左眉右角/22右眉左上角/26右眉右上角/36左眼左上角/39左眼右上角/42右眼左上角/\r\n # 45右眼右上角/31鼻子左上角/35鼻子右上角/48左上角/54嘴右上角/57嘴中央下角/8下巴角\r\n image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],\r\n shape[39], shape[42], shape[45], shape[31], shape[35],\r\n shape[48], shape[54], shape[57], shape[8]])\r\n # solvePnP计算姿势——求解旋转和平移矩阵:\r\n # rotation_vec表示旋转矩阵,translation_vec表示平移矩阵,cam_matrix与K矩阵对应,dist_coeffs与D矩阵对应。\r\n _, rotation_vec, translation_vec = cv2.solvePnP(self.object_pts, image_pts, self.cam_matrix, self.dist_coeffs)\r\n # projectPoints重新投影误差:原2d点和重投影2d点的距离(输入3d点、相机内参、相机畸变、r、t,输出重投影2d点)\r\n reprojectdst, _ = cv2.projectPoints(self.reprojectsrc, rotation_vec, translation_vec, self.cam_matrix,\r\n self.dist_coeffs)\r\n reprojectdst = tuple(map(tuple, reprojectdst.reshape(8, 2))) # 以8行2列显示\r\n\r\n # 计算欧拉角calc euler angle\r\n # 参考https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#decomposeprojectionmatrix\r\n rotation_mat, _ = cv2.Rodrigues(rotation_vec) # 罗德里格斯公式(将旋转矩阵转换为旋转向量)\r\n pose_mat = cv2.hconcat((rotation_mat, translation_vec)) # 水平拼接,vconcat垂直拼接\r\n # decomposeProjectionMatrix将投影矩阵分解为旋转矩阵和相机矩阵\r\n _, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)\r\n\r\n pitch, yaw, roll = [math.radians(_) for _ in euler_angle]\r\n\r\n pitch = math.degrees(math.asin(math.sin(pitch)))\r\n roll = -math.degrees(math.asin(math.sin(roll)))\r\n yaw = math.degrees(math.asin(math.sin(yaw)))\r\n # print('pitch:{}, yaw:{}, roll:{}'.format(pitch, yaw, roll))\r\n\r\n return reprojectdst, euler_angle # 投影误差,欧拉角\r\n\r\n def eye_aspect_ratio(self, eye):\r\n # 垂直眼标志(X,Y)坐标\r\n A = dist.euclidean(eye[1], eye[5]) # 计算两个集合之间的欧式距离\r\n B = dist.euclidean(eye[2], eye[4])\r\n # 计算水平之间的欧几里得距离\r\n # 水平眼标志(X,Y)坐标\r\n C = dist.euclidean(eye[0], eye[3])\r\n # 眼睛长宽比的计算\r\n ear = (A + B) / (2.0 * C)\r\n # 返回眼睛的长宽比\r\n return ear\r\n\r\n def mouth_aspect_ratio(self, mouth): # 嘴部\r\n A = np.linalg.norm(mouth[2] - mouth[9]) # 51, 59\r\n B = np.linalg.norm(mouth[4] - mouth[7]) # 53, 57\r\n C = np.linalg.norm(mouth[0] - mouth[6]) # 49, 55\r\n mar = (A + B) / (2.0 * C)\r\n return mar\r\n\r\n # 处理存放所有人脸特征的 csv\r\n path_features_known_csv = \"G:/pycharm project/python project/face detecting/1111.csv\"\r\n csv_rd = pd.read_csv(path_features_known_csv, header=None)\r\n\r\n\r\n # 读取已知人脸数据\r\n # print known faces\r\n for i in range(csv_rd.shape[0]):\r\n features_someone_arr = []\r\n for j in range(0, len(csv_rd.iloc[i, :])):\r\n features_someone_arr.append(csv_rd.iloc[i, :][j])\r\n features_known_arr.append(features_someone_arr)\r\n print(\"Faces in Database:\", len(features_known_arr))\r\n\r\n def _learning_face(self, event):\r\n \"\"\"dlib的初始化调用\"\"\"\r\n # 使用人脸检测器get_frontal_face_detector\r\n self.detector = dlib.get_frontal_face_detector()\r\n # dlib的68点模型,使用作者训练好的特征预测器\r\n self.predictor = dlib.shape_predictor(\r\n \"G:/pycharm project/python project/face detecting/model/shape_predictor_68_face_landmarks.dat\")\r\n self.m_textCtrl3.AppendText(u\"Loading model successfully!!\\n\")\r\n # 分别获取左右眼面部标志的索引\r\n (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"left_eye\"]\r\n (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"right_eye\"]\r\n (mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"mouth\"]\r\n\r\n # 建cv2摄像头对象,这里使用电脑自带摄像头,如果接了外部摄像头,则自动切换到外部摄像头\r\n self.cap = cv2.VideoCapture(self.VIDEO_STREAM)\r\n\r\n if self.cap.isOpened() == True: # 返回true/false 检查初始化是否成功\r\n self.CAMERA_STYLE = True\r\n self.m_textCtrl3.AppendText(u\"Open the camera successfully!!\\n\")\r\n time_start = time.time()\r\n else:\r\n self.m_textCtrl3.AppendText(u\"Fail to open the camera!!\\n\")\r\n # 显示封面图\r\n self.bmp.SetBitmap(wx.Bitmap(self.image_cover))\r\n # 成功打开视频,循环读取视频流\r\n while (self.cap.isOpened()):\r\n # cap.read()\r\n # 返回两个值:\r\n # 一个布尔值true/false,用来判断读取视频是否成功/是否到视频末尾\r\n # 图像对象,图像的三维矩阵\r\n flag, im_rd = self.cap.read()\r\n kk = cv2.waitKey(1)\r\n # 取灰度\r\n img_gray = cv2.cvtColor(im_rd, cv2.COLOR_RGB2GRAY)\r\n\r\n # 使用人脸检测器检测每一帧图像中的人脸。并返回人脸数faces\r\n faces = self.detector(img_gray, 0)\r\n\r\n # 待会要写的字体 font to write later\r\n font = cv2.FONT_HERSHEY_COMPLEX\r\n\r\n # 存储当前摄像头中捕获到的所有人脸的坐标/名字\r\n # the list to save the positions and names of current faces captured\r\n pos_namelist = []\r\n name_namelist = []\r\n\r\n # 计算两个128D向量间的欧式距离\r\n # compute the e-distance between two 128D features\r\n def return_euclidean_distance(feature_1, feature_2):\r\n feature_1 = np.array(feature_1)\r\n feature_2 = np.array(feature_2)\r\n dist = np.sqrt(np.sum(np.square(feature_1 - feature_2)))\r\n return dist\r\n # 如果检测到人脸\r\n if (len(faces) != 0):\r\n # enumerate方法同时返回数据对象的索引和数据,k为索引,d为faces中的对象\r\n features_cap_arr = []\r\n for k, d in enumerate(faces):\r\n # 用红色矩形框出人脸\r\n cv2.rectangle(im_rd, (d.left(), d.top()), (d.right(), d.bottom()), (0, 0, 255), 1)\r\n # 使用预测器得到68点数据的坐标\r\n shape = self.predictor(im_rd, d)\r\n features_cap_arr.append(facerec.compute_face_descriptor(im_rd, shape))\r\n # 圆圈显示每个特征点\r\n for i in range(68):\r\n cv2.circle(im_rd, (shape.part(i).x, shape.part(i).y), 2, (0, 255, 0), -1, 8)\r\n for k in range(len(faces)):\r\n print(\"##### camera person\", k + 1, \"#####\")\r\n # 让人名跟随在矩形框的下方\r\n # 确定人名的位置坐标\r\n # 先默认所有人不认识,是 unknown\r\n # set the default names of faces with \"unknown\"\r\n name_namelist.append(\"unknown\")\r\n\r\n # 每个捕获人脸的名字坐标 the positions of faces captured\r\n pos_namelist.append(\r\n tuple([faces[k].left(), int(faces[k].bottom() + (faces[k].bottom() - faces[k].top()) / 4)]))\r\n\r\n # 对于某张人脸,遍历所有存储的人脸特征\r\n # for every faces detected, compare the faces in the database\r\n e_distance_list = []\r\n for i in range(len(features_known_arr)):\r\n # 如果 person_X 数据不为空\r\n if str(features_known_arr[i][0]) != '0.0':\r\n print(\"with person\", str(i + 1), \"the e distance: \", end='')\r\n e_distance_tmp = return_euclidean_distance(features_cap_arr[k], features_known_arr[i])\r\n print(e_distance_tmp)\r\n e_distance_list.append(e_distance_tmp)\r\n else:\r\n # 空数据 person_X\r\n e_distance_list.append(999999999)\r\n # 找出最接近的一个人脸数据是第几个\r\n # Find the one with minimum e distance\r\n similar_person_num = e_distance_list.index(min(e_distance_list))\r\n print(\"Minimum e distance with person\", int(similar_person_num) + 1)\r\n\r\n # 计算人脸识别特征与数据集特征的欧氏距离\r\n # 距离小于0.4则标出为可识别人物\r\n if min(e_distance_list) < 0.4:\r\n # 这里可以修改摄像头中标出的人名\r\n # Here you can modify the names shown on the camera\r\n # 1、遍历文件夹目���\r\n folder_name = 'G:/pycharm project/python project/face detecting/pictures/people'\r\n # 最接近的人脸\r\n sum = similar_person_num + 1\r\n key_id = 1 # 从第一个人脸数据文件夹进行对比\r\n # 获取文件夹中的文件名:1wang、2zhou、3...\r\n file_names = os.listdir(folder_name)\r\n for name in file_names:\r\n # print(name+'->'+str(key_id))\r\n if sum == key_id:\r\n # winsound.Beep(300,500)# 响铃:300频率,500持续时间\r\n name_namelist[k] = name[1:] # 人名删去第一个数字(用于视频输出标识)\r\n key_id += 1\r\n # 播放欢迎光临音效\r\n # playsound('D:/myworkspace/JupyterNotebook/People/music/welcome.wav')\r\n # print(\"May be person \"+str(int(similar_person_num)+1))\r\n # -----------筛选出人脸并保存到visitor文件夹------------\r\n for i, d in enumerate(faces):\r\n x1 = d.top() if d.top() > 0 else 0\r\n y1 = d.bottom() if d.bottom() > 0 else 0\r\n x2 = d.left() if d.left() > 0 else 0\r\n y2 = d.right() if d.right() > 0 else 0\r\n face = im_rd[x1:y1, x2:y2]\r\n size = 64\r\n face = cv2.resize(face, (size, size))\r\n # 要存储visitor人脸图像文件的路径\r\n # path_visitors_save_dir = \"D:/myworkspace/JupyterNotebook/People/visitor/known\"\r\n path_visitors_save_dir = \"G:/pycharm project/python project/face detecting/pictures/people/visitors/known\"\r\n # 存储格式:2019-06-24-14-33-40wang.jpg\r\n now_time = time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime())\r\n save_name = str(now_time) + str(name_namelist[k]) + '.jpg'\r\n # print(save_name)\r\n # 本次图片保存的完整url\r\n save_path = path_visitors_save_dir + '/' + save_name\r\n # 遍历visitor文件夹所有文件名\r\n visitor_names = os.listdir(path_visitors_save_dir)\r\n visitor_name = ''\r\n for name in visitor_names:\r\n # 名字切片到分钟数:2019-06-26-11-33-00wangyu.jpg\r\n visitor_name = (name[0:16] + '-00' + name[19:])\r\n # print(visitor_name)\r\n visitor_save = (save_name[0:16] + '-00' + save_name[19:])\r\n # print(visitor_save)\r\n # 一分钟之内重复的人名不保存\r\n if visitor_save != visitor_name:\r\n cv2.imwrite(save_path, face)\r\n print(\r\n '新存储:' + path_visitors_save_dir + '/' + str(now_time) + str(\r\n name_namelist[k]) + '.jpg')\r\n else:\r\n print('重复,未保存!')\r\n\r\n else:\r\n # 播放无法识别音效\r\n # playsound('D:/myworkspace/JupyterNotebook/People/music/sorry.wav')\r\n print(\"Unknown person\")\r\n # -----保存图片-------\r\n # -----------筛选出人脸并保存到visitor文件夹------------\r\n for i, d in enumerate(faces):\r\n x1 = d.top() if d.top() > 0 else 0\r\n y1 = d.bottom() if d.bottom() > 0 else 0\r\n x2 = d.left() if d.left() > 0 else 0\r\n y2 = d.right() if d.right() > 0 else 0\r\n face = im_rd[x1:y1, x2:y2]\r\n size = 64\r\n face = cv2.resize(face, (size, size))\r\n # 要存储visitor-》unknown人脸图像文件的路径\r\n path_visitors_save_dir = \"G:/pycharm project/python project/face detecting/pictures/people/visitors/unknown\"\r\n # 存储格式:2019-06-24-14-33-40unknown.jpg\r\n now_time = time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime())\r\n # print(save_name)\r\n # 本次图片保存的完整url\r\n save_path = path_visitors_save_dir + '/' + str(now_time) + 'unknown.jpg'\r\n cv2.imwrite(save_path, face)\r\n print('新存储:' + path_visitors_save_dir + '/' + str(now_time) + 'unknown.jpg')\r\n # 在人脸框下面写人脸名字\r\n # write names under rectangle\r\n for i in range(len(faces)):\r\n cv2.putText(im_rd, name_namelist[i], pos_namelist[i], font, 0.8, (0, 255, 255), 1,cv2.LINE_AA)\r\n\r\n print(\"Faces in camera now:\", name_namelist, \"\\n\")\r\n # 将脸部特征信息转换为数组array的格式\r\n shape = face_utils.shape_to_np(shape)\r\n \"\"\"\r\n 打哈欠\r\n \"\"\"\r\n if self.yawn_checkBox1.GetValue() == True:\r\n # 嘴巴坐标\r\n mouth = shape[mStart:mEnd]\r\n # 打哈欠\r\n mar = self.mouth_aspect_ratio(mouth)\r\n # 使用cv2.convexHull获得凸包位置,使用drawContours画出轮廓位置进行画图操作\r\n mouthHull = cv2.convexHull(mouth)\r\n cv2.drawContours(im_rd, [mouthHull], -1, (0, 255, 0), 1)\r\n # 同理,判断是否打哈欠\r\n if mar > self.MAR_THRESH: # 张嘴阈值0.5\r\n self.mCOUNTER += 1\r\n else:\r\n # 如果连续3次都小于阈值,则表示打了一次哈欠\r\n if self.mCOUNTER >= self.MOUTH_AR_CONSEC_FRAMES: # 阈值:3\r\n self.mTOTAL += 1\r\n # 显示\r\n cv2.putText(im_rd, \"Yawning!\", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\r\n self.m_textCtrl3.AppendText(\r\n time.strftime('%Y-%m-%d %H:%M ', time.localtime()) + u\"yawn!!!\\n\")\r\n # 重置嘴帧计数器\r\n self.mCOUNTER = 0\r\n cv2.putText(im_rd, \"COUNTER: {}\".format(self.mCOUNTER), (150, 60), cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.7, (0, 0, 255), 2)\r\n cv2.putText(im_rd, \"MAR: {:.2f}\".format(mar), (300, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\r\n (0, 0, 255), 2)\r\n cv2.putText(im_rd, \"Yawning: {}\".format(self.mTOTAL), (450, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\r\n (255, 255, 0), 2)\r\n else:\r\n pass\r\n \"\"\"\r\n 眨眼\r\n \"\"\"\r\n if self.blink_checkBox2.GetValue() == True:\r\n # 提取左眼和右眼坐标\r\n leftEye = shape[lStart:lEnd]\r\n rightEye = shape[rStart:rEnd]\r\n # 构造函数计算左右眼的EAR值,使用平均值作为最终的EAR\r\n leftEAR = self.eye_aspect_ratio(leftEye)\r\n rightEAR = self.eye_aspect_ratio(rightEye)\r\n ear = (leftEAR + rightEAR) / 2.0\r\n leftEyeHull = cv2.convexHull(leftEye)\r\n rightEyeHull = cv2.convexHull(rightEye)\r\n # 使用cv2.convexHull获得凸包位置,使用drawContours画出轮廓位置进行画图操作\r\n cv2.drawContours(im_rd, [leftEyeHull], -1, (0, 255, 0), 1)\r\n cv2.drawContours(im_rd, [rightEyeHull], -1, (0, 255, 0), 1)\r\n # 循环,满足条件的,眨眼次数+1\r\n if ear < self.EYE_AR_THRESH: # 眼睛长宽比:0.2\r\n self.COUNTER += 1\r\n\r\n else:\r\n # 如果连续3次都小于阈值,则表示进行了一次眨眼活动\r\n if self.COUNTER >= self.EYE_AR_CONSEC_FRAMES: # 阈值:3\r\n self.TOTAL += 1\r\n self.m_textCtrl3.AppendText(\r\n time.strftime('%Y-%m-%d %H:%M ', time.localtime()) + u\"blink!!!\\n\")\r\n # 重置眼帧计数器\r\n self.COUNTER = 0\r\n # 第十四步:进行画图操作,同时使用cv2.putText将眨眼次数进行显示\r\n cv2.putText(im_rd, \"Faces: {}\".format(len(faces)), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\r\n (0, 0, 255), 2)\r\n cv2.putText(im_rd, \"COUNTER: {}\".format(self.COUNTER), (150, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\r\n (0, 0, 255), 2)\r\n cv2.putText(im_rd, \"EAR: {:.2f}\".format(ear), (300, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\r\n (0, 0, 255), 2)\r\n cv2.putText(im_rd, \"Blinks: {}\".format(self.TOTAL), (450, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\r\n (255, 255, 0), 2)\r\n else:\r\n pass\r\n \"\"\"\r\n 瞌睡点头\r\n \"\"\"\r\n if self.nod_checkBox7.GetValue() == True:\r\n # 获取头部姿态\r\n reprojectdst, euler_angle = self.get_head_pose(shape)\r\n har = euler_angle[0, 0] # 取pitch旋转角度\r\n if har > self.HAR_THRESH: # 点头阈值0.3\r\n self.hCOUNTER += 1\r\n else:\r\n # 如果连续3次都小于阈值,则表示瞌睡点头一次\r\n if self.hCOUNTER >= self.NOD_AR_CONSEC_FRAMES: # 阈值:3\r\n self.hTOTAL += 1\r\n self.m_textCtrl3.AppendText(\r\n time.strftime('%Y-%m-%d %H:%M ', time.localtime()) + u\"sleepy nod!!! \\n\")\r\n # 重置点头帧计数器\r\n self.hCOUNTER = 0\r\n # 绘制正方体12轴(视频流尺寸过大时,reprojectdst会超出int范围,建议压缩检测视频尺寸)\r\n # for start, end in self.line_pairs:\r\n # im_rd = im_rd.astype(int)\r\n # print(reprojectdst)[start]\r\n # cv2.line(im_rd, reprojectdst[start], reprojectdst[end], (0, 0, 255))\r\n # 显示角度结果\r\n cv2.putText(im_rd, \"X: \" + \"{:7.2f}\".format(euler_angle[0, 0]), (10, 90),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), thickness=2) # GREEN\r\n cv2.putText(im_rd, \"Y: \" + \"{:7.2f}\".format(euler_angle[1, 0]), (150, 90),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 0, 0), thickness=2) # BLUE\r\n cv2.putText(im_rd, \"Z: \" + \"{:7.2f}\".format(euler_angle[2, 0]), (300, 90),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), thickness=2) # RED\r\n cv2.putText(im_rd, \"Nod: {}\".format(self.hTOTAL), (450, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\r\n (255, 255, 0), 2)\r\n else:\r\n pass\r\n\r\n print('Real-time mouth aspect ratio:{:.2f} '.format(mar) + \"\\tYawn or not:\" + str([False, True][mar > self.MAR_THRESH]))\r\n print('Real-time eye aspect ratio:{:.2f} '.format(ear) + \"\\tBlink or not:\" + str([False, True][self.COUNTER >= 1]))\r\n else:\r\n # 没有检测到人脸\r\n self.oCOUNTER += 1\r\n cv2.putText(im_rd, \"No Face\", (20, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3, cv2.LINE_AA)\r\n if self.oCOUNTER >= self.OUT_AR_CONSEC_FRAMES_check:\r\n self.m_textCtrl3.AppendText(time.strftime('%Y-%m-%d %H:%M ', time.localtime()) + u\"absence!!!\\n\")\r\n self.oCOUNTER = 0\r\n\r\n # 确定疲劳提示:眨眼50次,打哈欠15次,瞌睡点头30次\r\n time_end = time.time()\r\n timecost = time_end - time_start\r\n print(timecost)\r\n if ((self.TOTAL >= 50 or self.mTOTAL >= 15 or self.hTOTAL >= 30) and (timecost < 200)) or (timecost>14400):\r\n cv2.putText(im_rd, \"SLEEP!!!\", (100, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 3)\r\n\r\n # self.m_textCtrl3.AppendText(u\"疲劳\")\r\n\r\n # opencv中imread的图片内部是BGR排序,wxPython的StaticBitmap需要的图片是RGB排序,不转换会出现颜色变换\r\n height, width = im_rd.shape[:2]\r\n image1 = cv2.cvtColor(im_rd, cv2.COLOR_BGR2RGB)\r\n pic = wx.Bitmap.FromBuffer(width, height, image1)\r\n # 显示图片在panel上:\r\n self.bmp.SetBitmap(pic)\r\n\r\n # 释放摄像头\r\n self.cap.release()\r\n\r\n def update(self,event):\r\n people = os.listdir(path_images_from_camera)\r\n people.sort()\r\n\r\n # with open(\"D:/myworkspace/JupyterNotebook/People/feature/features2_all.csv\", \"w\", newline=\"\") as csvfile:\r\n with open(\"G:/pycharm project/python project/face detecting/1111.csv\", \"w\", newline=\"\") as csvfile:\r\n writer = csv.writer(csvfile)\r\n for person in people:\r\n print(\"##### \" + person + \" #####\")\r\n # Get the mean/average features of face/personX, it will be a list with a length of 128D\r\n features_mean_personX = return_features_mean_personX(path_images_from_camera + person)\r\n writer.writerow(features_mean_personX)\r\n print(\"特征均值 / The mean of features:\", list(features_mean_personX))\r\n print('\\n')\r\n # print(\"所有录入人脸数据存入 / Save all the features of faces registered into: D:/myworkspace/JupyterNotebook/People/feature/features_all2.csv\")\r\n print(\r\n \"所有录入人脸数据存入 / Save all the features of faces registered into: G:/pycharm project/python project/face detecting/1111.csv\")\r\n\r\n # import _thread\r\n # # 创建子线程,按钮调用这个方法,\r\n # _thread.start_new_thread(self._learning_face, (event,))\r\n def upload(self,event):\r\n\r\n dlg = wx.MessageDialog(None, u'Is this your first time using it?', u'Operating hints',wx.YES_NO | wx.ICON_QUESTION)\r\n if (dlg.ShowModal() == wx.ID_YES):\r\n # dlg = wx.TextEntryDialog()\r\n # dlg.Destroy()\r\n # dlg = wx.TextEntryDialog(None,u'Please create a folder named \"number\"+\"letter\", for instance:\"1czx')\r\n folder_name = input('Please create a folder named \"number\"+\"letter\", for instance:\"1czx\\n')\r\n path = r'G:\\pycharm project\\python project\\face detecting\\pictures\\people' + '/' +folder_name\r\n if not os.path.exists(path):\r\n os.makedirs(path)\r\n print('create folder successfully')\r\n else:\r\n print('fail to create the folder,it is exist')\r\n dlg.Destroy() # 取消弹窗\r\n else:\r\n dialog = wx.FileDialog(self, u\"choose photos\", os.getcwd(), '', wildcard=\"(*.jpg)|*.jpg\",\r\n style=wx.FD_OPEN | wx.FD_CHANGE_DIR)\r\n if dialog.ShowModal() == wx.ID_OK:\r\n path = str(dialog.GetPath()) # 更新全局变量路径\r\n filepath = os.path.basename(path)\r\n # print(path,filepath)\r\n folder_name = input('Please choose your folder named \"number\"+\"letter\", for instance:\"1czx\\n')\r\n path1 = r'G:\\pycharm project\\python project\\face detecting\\pictures\\people' + '/' + folder_name + '/' + filepath\r\n if not os.path.exists(path1):\r\n shutil.copyfile(dialog.GetPath(), path1)\r\n print('upload photo successfully')\r\n else:\r\n print('fail to create the folder,it is exist')\r\n\r\n\r\n #os.makedirs(path)\r\n # 选择文件夹对话框窗口\r\n # dialog = wx.FileDialog(self, u\"choose videos\", os.getcwd(), '', wildcard=\"(*.mp4)|*.mp4\",\r\n # style=wx.FD_OPEN | wx.FD_CHANGE_DIR)\r\n # if dialog.ShowModal() == wx.ID_OK:\r\n # # 如果确定了选择的文件夹,将文件夹路径写到m_textCtrl3控件\r\n # # self.m_textCtrl3.SetValue(u\"文件路径:\" + dialog.GetPath() + \"\\n\")\r\n # # self.VIDEO_STREAM = str(dialog.GetPath()) # 更新全局变量路径\r\n\r\n # dialog.Destroy\r\n \"\"\"使用多线程,子线程运行后台的程序,主线程更新前台的UI,这样不会互相影响\"\"\"\r\n # import _thread\r\n # # 创建子线程,按钮调用这个方法,\r\n # _thread.start_new_thread(self._learning_face, (event,))\r\n\r\n\r\n def camera_on(self, event):\r\n \"\"\"使用多线程,子线程运行后台的程序,主线程更新前台的UI,这样不会互相影响\"\"\"\r\n import _thread\r\n # 创建子线程,按钮调用这个方法,\r\n _thread.start_new_thread(self._learning_face, (event,))\r\n\r\n def cameraid_choice(self, event):\r\n # 摄像头编号\r\n cameraid = int(event.GetString()[-1]) # 截取最后一个字符\r\n if cameraid == 0:\r\n self.m_textCtrl3.AppendText(u\"Prepare to open the local camera!!!\\n\")\r\n if cameraid == 1 or cameraid == 2:\r\n self.m_textCtrl3.AppendText(u\"Prepart to open the external camera!!!\\n\")\r\n self.VIDEO_STREAM = cameraid\r\n\r\n def vedio_on(self, event):\r\n if self.CAMERA_STYLE == True: # 释放摄像头资源\r\n # 弹出关闭摄像头提示窗口\r\n dlg = wx.MessageDialog(None, u'Are you sure you want to close it?', u'Operating hints', wx.YES_NO | wx.ICON_QUESTION)\r\n if (dlg.ShowModal() == wx.ID_YES):\r\n self.cap.release() # 释放摄像头\r\n self.bmp.SetBitmap(wx.Bitmap(self.image_cover)) # 封面\r\n dlg.Destroy() # 取消弹窗\r\n # 选择文件夹对话框窗口\r\n dialog = wx.FileDialog(self, u\"choose videos\", os.getcwd(), '', wildcard=\"(*.mp4)|*.mp4\",\r\n style=wx.FD_OPEN | wx.FD_CHANGE_DIR)\r\n if dialog.ShowModal() == wx.ID_OK:\r\n # 如果确定了选择的文件夹,将文件夹路径写到m_textCtrl3控件\r\n self.m_textCtrl3.SetValue(u\"文件路径:\" + dialog.GetPath() + \"\\n\")\r\n self.VIDEO_STREAM = str(dialog.GetPath()) # 更新全局变量路径\r\n dialog.Destroy\r\n \"\"\"使用多线程,子线程运行后台的程序,主线程更新前台的UI,这样不会互相影响\"\"\"\r\n import _thread\r\n # 创建子线程,按钮调用这个方法,\r\n _thread.start_new_thread(self._learning_face, (event,))\r\n\r\n def AR_CONSEC_FRAMES(self, event):\r\n self.m_textCtrl3.AppendText(u\"设置疲劳间隔为:\\t\" + event.GetString() + \"秒\\n\")\r\n self.AR_CONSEC_FRAMES_check = int(event.GetString())\r\n\r\n def OUT_AR_CONSEC_FRAMES(self, event):\r\n self.m_textCtrl3.AppendText(u\"设置脱岗间隔为:\\t\" + event.GetString() + \"秒\\n\")\r\n self.OUT_AR_CONSEC_FRAMES_check = int(event.GetString())\r\n\r\n def off(self, event):\r\n \"\"\"关闭摄像头,显示封面页\"\"\"\r\n self.cap.release()\r\n self.bmp.SetBitmap(wx.Bitmap(self.image_cover))\r\n\r\n def OnClose(self, evt):\r\n \"\"\"关闭窗口事件函数\"\"\"\r\n dlg = wx.MessageDialog(None, u'Are you sure you want to close it?', u'Operating hints', wx.YES_NO | wx.ICON_QUESTION)\r\n if (dlg.ShowModal() == wx.ID_YES):\r\n self.Destroy()\r\n print(\"detecting finish\")\r\n\r\n\r\nclass main_app(wx.App):\r\n \"\"\"\r\n 在OnInit() 里边申请Frame类,这样能保证一定是在app后调用,\r\n 这个函数是app执行完自己的__init__函数后就会执行\r\n \"\"\"\r\n\r\n # OnInit 方法在主事件循环开始前被wxPython系统调用,是wxpython独有的\r\n def OnInit(self):\r\n self.frame = Fatigue_detecting(parent=None, title=\"Fatigue Demo\")\r\n self.frame.Show(True)\r\n return True\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = main_app()\r\n app.MainLoop()\r\n\r\n","repo_name":"shalowdream/fatigue-detecting","sub_path":"final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":48577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"38911002508","text":"import json\nimport boto3\nimport os\nfrom boto3.dynamodb.conditions import Key, Attr\n\ndynamodb = boto3.resource('dynamodb')\nENVIRONMENT = os.environ['ENVIRONMENT']\n\n\ndef getEvents(event, context):\n global dynamodb\n response_value = {\n 'statusCode': 500,\n 'body': json.dumps({\"error\": \"Internal Error\"}),\n 'headers': {\n 'Content-Type': 'application/json',\n 'Access-Control-Allow-Origin': '*'\n }\n }\n try:\n print(event[\"queryStringParameters\"])\n table = dynamodb.Table('Events_' + ENVIRONMENT)\n if event[\"queryStringParameters\"] is not None:\n if 'EventType' in event[\"queryStringParameters\"]:\n data = table.scan(\n FilterExpression=Attr(\"EventType\").eq(event[\"queryStringParameters\"][\"EventType\"]))\n else:\n data = table.scan()\n\n response_value = {\n 'statusCode': 200,\n 'body': json.dumps(data['Items']),\n 'headers': {\n 'Content-Type': 'application/json',\n 'Access-Control-Allow-Origin': '*'\n }\n }\n except Exception as e:\n print(e)\n\n return response_value\n","repo_name":"TheRealSeat/Capstone","sub_path":"server/lambdas/events/get/getEvents.py","file_name":"getEvents.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"}
+{"seq_id":"95886992","text":"\"\"\"empty message\n\nRevision ID: ee440e418780\nRevises: \nCreate Date: 2022-09-06 21:03:17.069513\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ee440e418780'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('first_name', sa.String(length=20), nullable=True),\n sa.Column('last_name', sa.String(length=20), nullable=True),\n sa.Column('email', sa.String(length=120), nullable=True),\n sa.Column('password', sa.String(), nullable=True),\n sa.Column('created_on', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)\n op.create_index(op.f('ix_user_first_name'), 'user', ['first_name'], unique=False)\n op.create_index(op.f('ix_user_last_name'), 'user', ['last_name'], unique=False)\n op.create_table('toolbox',\n sa.Column('toolbox_id', sa.Integer(), nullable=False),\n sa.Column('userid', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['userid'], ['user.id'], ),\n sa.PrimaryKeyConstraint('toolbox_id')\n )\n op.create_table('tool',\n sa.Column('tool_id', sa.Integer(), nullable=False),\n sa.Column('toolboxid', sa.Integer(), nullable=True),\n sa.Column('tool_name', sa.String(length=20), nullable=True),\n sa.Column('tool_brand', sa.String(length=20), nullable=True),\n sa.Column('quantity', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['toolboxid'], ['toolbox.toolbox_id'], ),\n sa.PrimaryKeyConstraint('tool_id')\n )\n op.create_index(op.f('ix_tool_tool_brand'), 'tool', ['tool_brand'], unique=False)\n op.create_index(op.f('ix_tool_tool_name'), 'tool', ['tool_name'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_tool_tool_name'), table_name='tool')\n op.drop_index(op.f('ix_tool_tool_brand'), table_name='tool')\n op.drop_table('tool')\n op.drop_table('toolbox')\n op.drop_index(op.f('ix_user_last_name'), table_name='user')\n op.drop_index(op.f('ix_user_first_name'), table_name='user')\n op.drop_index(op.f('ix_user_email'), table_name='user')\n op.drop_table('user')\n # ### end Alembic commands ###\n","repo_name":"ag4sm/Toolbox","sub_path":"migrations/versions/ee440e418780_.py","file_name":"ee440e418780_.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"33509536380","text":"# Example of neutron reflectivity dataset refinement from a\n# thick SiO2 layer at the air/Si interface\n# Measurments acquired at D17 instrument (ILL, Grenoble)\nfrom anaklasis import ref\n\nproject='SiO_fit'\nin_file=['D17_SiO.dat']\nunits=['A']\n\nfit_mode=0 # 0 is for linear, 1 is for log\nfit_weight=[1]\nmethod = 'mcmc' # Markov Chain Monte Carlo Sampling\n\nresolution=[-1] # pointwise resolution\n\nmodel = [\n\t# Re_sld Im_sld thk rough solv description\n\t[ 0.0, 0.0, 0, 'p0', 0.0, 'Air'],\n\t[ 'p1', 0.0, 'p2', 'p3', 0.0, 'SiOx'],\n\t[ 2.07e-6, 0.0, 0, 0.0, 0.0, 'Si'],\n\t]\n\nsystem=[model]\npatches=[1.0]\n\nglobal_param = [\n # param min max description type\n ['p0', 0, 20, 'air/SiOx_roughness','uniform'],\n ['p1', 3.3e-6, 3.7e-6, 'SiOx_sld','uniform'],\n ['p2', 0, 2000, 'SiOx_thickness','uniform'],\n ['p3', 0, 30, 'SiOx/Si_roughness','uniform'],\n\t]\n\nmulti_param = []\nconstraints = []\n\nbackground = [\n\t[0.0e-11,1.0e-5,'uniform'],\n\t]\n\nscale = [\n\t[0.8,1.1,'uniform'],\n\t]\n\nres = ref.fit(project, in_file, units, fit_mode, fit_weight,method,resolution,patches, system,\nglobal_param,multi_param, constraints, background,scale,experror=True, plot=True,fast=True)\n","repo_name":"alexandros-koutsioumpas/anaklasis","sub_path":"examples/thick_SiO2_layer_fit.py","file_name":"thick_SiO2_layer_fit.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"8667332583","text":"import time\nimport sys\n#import platform\nimport socket\n\nLATENCY_TIMER = 16\n\nSOCKET_TIMEOUT = .100\n\n\nclass TCPPortHandler(object):\n def __init__(self, port):\n self.is_open = False\n self.packet_timeout = 0.0\n self.tx_time_per_byte = 0.0\n\n self.is_using = False\n self.socket = None\n self.port = port\n\n\n def openPort(self):\n print(\"Opening \",self.port)\n try:\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.settimeout(SOCKET_TIMEOUT)\n self.socket.connect(self.port)\n self.is_open = True\n return True\n except Exception as e:\n print (\"Port Open Failed with Exception: %s\"%(e))\n del self.socket\n self.socket = None\n self.is_open = False\n\n def closePort(self):\n if(self.is_open):\n self.socket.close()\n del self.socket\n self.socket = None\n self.is_open = False\n\n def readPort(self, length):\n if(not self.is_open):\n return \"\"\n try:\n return self.socket.recv(length)\n except Exception as ex:\n #print(\"Reading...\")\n #print(ex)\n # self.closePort()\n return \"\"\n\n def writePort(self, packet):\n if(not self.is_open):\n return 0\n try:\n return self.socket.send(bytes(packet))\n except Exception as ex:\n print(ex)\n return 0\n\n def clearPort(self):\n # timeout = self.socket.gettimeout()\n # try:\n # self.socket.settimeout(0)\n # self.socket.recv(1024)\n # finally:\n # self.socket.settimeout(timeout)\n pass\n\n def setPacketTimeout(self, packet_length):\n self.packet_start_time = self.getCurrentTime()\n self.packet_timeout = (self.tx_time_per_byte * packet_length) + (LATENCY_TIMER * 2.0) + 2.0\n\n def setPacketTimeoutMillis(self, msec):\n self.packet_start_time = self.getCurrentTime()\n self.packet_timeout = msec\n\n def isPacketTimeout(self):\n if self.getTimeSinceStart() > self.packet_timeout:\n self.packet_timeout = 0\n return True\n\n return False\n\n def getCurrentTime(self):\n return round(time.time() * 1000000000) / 1000000.0\n\n def getTimeSinceStart(self):\n time_since = self.getCurrentTime() - self.packet_start_time\n if time_since < 0.0:\n self.packet_start_time = self.getCurrentTime()\n\n return time_since\n ","repo_name":"Lordy2001/ServoController","sub_path":"dynamixel_sdk/tcp_port_handler.py","file_name":"tcp_port_handler.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"27560300030","text":"\n\nimport sqlite3\nconnection = sqlite3.connect(\"Artikel.db\")\n\ncursor = connection.cursor()\n\n\nfor row in cursor.execute('SELECT * FROM Artikel'):\n print(row)\n\n# Artikeln anzeigen\nartikeln = [\n {'id': 1, 'artikel-nr': 10010, 'name':'Pullover', 'description':'das ist die beschreibung des Artikels','price':20.55,'status':True,'lagerbestand':100,'created_at': '2021-11-19 16:09:10','updated_at': '',},\n {'id': 2, 'artikel-nr': 10020, 'name':'Hose', 'description':'das ist die beschreibung des Artikels','price':50.99,'status':True,'lagerbestand':100,'created_at': '2021-11-19 16:09:10','updated_at': '',},\n {'id': 3, 'artikel-nr': 10030, 'name':'T-Shirt', 'description':'das ist die beschreibung des Artikels','price':20.55,'status':True,'lagerbestand':100,'created_at': '2021-11-19 16:09:10','updated_at': '',},\n {'id': 4, 'artikel-nr': 10040, 'name':'Schale', 'description':'das ist die beschreibung des Artikels','price':20.55,'status':True,'lagerbestand':100,'created_at': '2021-11-19 16:09:10','updated_at': '',},\n {'id': 5, 'artikel-nr': 10050, 'name':'Mütze', 'description':'das ist die beschreibung des Artikels','price':20.55,'status':True,'lagerbestand':100,'created_at': '2021-11-19 16:09:10','updated_at': '',},\n {'id': 6, 'artikel-nr': 10060, 'name':'Unterhose', 'description':'das ist die beschreibung des Artikels','price':20.55,'status':True,'lagerbestand':100,'created_at': '2021-11-19 16:09:10','updated_at': '',},\n {'id': 7, 'artikel-nr': 10070, 'name':'Soken', 'description':'das ist die beschreibung des Artikels','price':20.55,'status':True,'lagerbestand':100,'created_at': '2021-11-19 16:09:10','updated_at': '',},\n]\n\n#Wahrenkorb\nkorb = [\n {'id': 1, 'count':3},\n]\n\n#print(artikeln)\n\n\n\n\ndef ArtileIndex():\n #Artikel anzeigen\n print('alle Artikeln anzeigen')\n # gib mir alle daten aus der Datenbank\n for value in artikeln:\n print(artikeln[value]['name'], 'Prise: ', artikeln[value]['price'])\n\ndef ArtikelStore():\n # Artikel store\n artikel = {\n 'id': 8,\n 'artikel-nr': 10080,\n 'name':'Rosa Lutscher',\n 'description':'das ist die beschreibung des Artikels',\n 'price':20.55,\n 'status':True,\n 'lagerbestand':100,\n 'created_at': '2021-11-19 16:09:10',\n 'updated_at': '',\n }\n artikeln.append(artikel)\n\n print(artikeln)\n\n print('Artikeln eingabe speichern')\n\n#funktion aufruf der store methode\n#ArtikelStore()\n\n\ndef ArtikelEdit(id):\n # Artikel edit\n print('Artikeln bearbeiten')\n for item in artikeln:\n #print(item['id'])\n if item['id'] == id:\n print(item)\n # frontend übergabe\n\n\n# funktion methode aufrufen\n#ArtikelEdit(5)\n\n\ndef ArtikelUpdate(id,request):\n # Artikel update\n #print(request['name'])\n print('Artikeln update')\n for item in artikeln:\n #print(item['id'])\n if item['id'] == id:\n print('Vorher: ',item)\n item['name'] = request['name']\n item['description'] = request['description']\n item['price'] = request['price']\n item['lagerbestand'] = request['lagerbestand']\n\n print('Nacher: ',item)\n\n\n\n# update methode\nupdate_variable = {\n 'name':'Rote Hose',\n 'description':'ich bin der Test mit der Roten Hose',\n 'price':99.50,\n 'lagerbestand':50\n}\n\n#ArtikelUpdate(3, update_variable)\n\n\ndef ArtikelDelete(id):\n # Artikel delete\n print('Artikeln löschen')\n print(artikeln)\n for index,item in enumerate(artikeln):\n #print(item['id'])\n if item['id'] == id:\n artikeln.pop(index)\n print(index)\n\n print(repr(artikeln))\n\n#ArtikelDelete(7)\n\ndef warenKorb(id):\n # wahrenkorb erweitern\n # um einen Artikel mit der ID\n for item in artikeln:\n if item['id'] == id:\n korb.append(item)\n print(\"die artikel now gewählt\" ,korb)\n#funktions aufruf\n#warenKorb(7)\n\n\ndef artikelstand():\n print(len(korb))\n if len(korb) > 0:\n print('korb ist nicht leer')\n else:\n print('korb ist leer')\n\n\n#functions aufruf\nartikelstand()\n\n\n\n#frontend\ndef allArticls():\n for value in artikeln:\n print(value['name'],value['id'],)\n\nallArticls()","repo_name":"shivaparto/PythonOnlineShop","sub_path":"DataLayer/all.py","file_name":"all.py","file_ext":"py","file_size_in_byte":4217,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"33173966878","text":"#!/usr/bin/env python3\n# Advent of Code 2022: Day 2 (part 2)\n# Submission by qxtal \n\nf = open('./input.txt', 'r')\ngame_input = f.readlines()\nf.close()\n\n# This can definitely be done in a better way, but it works.\nlookup = {\n \"X\": { # Player: Needs to LOSE\n \"A\": 3 + 0, # Opponent: Rock - Player pick: Scissor\n \"B\": 1 + 0, # Opponent: Paper - Player pick: Rock\n \"C\": 2 + 0 # Opponent: Scissor - Player pick: Paper\n },\n \"Y\": { # Player: Needs to TIE\n \"A\": 1 + 3, # Opponent: Rock - Player pick: Rock\n \"B\": 2 + 3, # Opponent: Paper - Player pick: Paper\n \"C\": 3 + 3 # Opponent: Scissor - Player pick: Scissor\n },\n \"Z\": { # Player: Needs to WIN\n \"A\": 2 + 6, # Opponent: Rock - Player pick: Paper\n \"B\": 3 + 6, # Opponent: Paper - Player pick: Scissor\n \"C\": 1 + 6 # Opponent: Scissor - Player pick: Rock\n }\n}\n\nscore = 0\n\nfor line in game_input:\n opponent = line.split()[0]\n player = line.split()[1]\n score += lookup[player][opponent]\n\nprint(score)","repo_name":"qxtal/advent-of-code","sub_path":"2022/02/aoc-02b.py","file_name":"aoc-02b.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"992302676","text":"def menu():\n\tprint(\"-\"*20)\n\tprint(\"Menu: \")\n\tprint(\"1. Encode\")\n\tprint(\"2. Decode\")\n\tprint(\"-\"*20)\n\n\tchoice = int(input(\"Enter your choice: \"))\n\n\twhile choice not in (1, 2):\n\t\tchoice = int(input(\"Invalid, pls re-enter your choice: \"))\n\n\treturn choice\n\n\n\ndef cipher(char, n):\n\tnew_ascii = ord(char) + n\n\n\tif (new_ascii not in range(97, 122+1)) and (new_ascii not in range(65, 90+1)):\n\n\t\tif n > 0:\n\t\t\tnew_ascii = new_ascii - 26\n\t\t\t\n\t\telse:\n\t\t\tnew_ascii = new_ascii + 26\n\n\treturn chr(new_ascii)\n\n\n\ndef encode():\n\tstring = input(\"Msg to be encoded: \")\n\tn = int(input(\"Caesar value: \"))\n\n\tencoded_str = \"\"\n\n\tfor char in string:\n\t\tif char.isalpha():\n\t\t\tencoded_str += cipher(char, n)\n\n\t\telse:\n\t\t\tencoded_str += char\n\t\t\n\tprint(encoded_str)\n\n\n\ndef decode():\n\tstring = input(\"Msg to be decoded: \")\n\tn = int(input(\"Caesar value: \"))*(-1)\n\n\tdecoded = \"\"\n\n\tfor char in string:\n\t\tif char.isalpha():\n\t\t\tdecoded += cipher(char, n)\n\n\t\telse:\n\t\t\tdecoded += char\n\t\t\n\tprint(decoded)\n\n\n\nif __name__ == \"__main__\":\n\tchoice = menu()\n\n\tif choice == 1:\n\t\tencode()\n\telse:\n\t\tdecode()\n\n","repo_name":"howtoosee/Mini_Projects","sub_path":"caesar.py","file_name":"caesar.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"16946103406","text":"phs = ['AP',\n 'SP',\n 'a',\n 'ai',\n 'an',\n 'ang',\n 'ao',\n 'b',\n 'c',\n 'ch',\n 'd',\n 'e',\n 'ei',\n 'en',\n 'eng',\n 'er',\n 'f',\n 'g',\n 'h',\n 'i',\n 'ia',\n 'ian',\n 'iang',\n 'iao',\n 'ie',\n 'in',\n 'ing',\n 'iong',\n 'iu',\n 'j',\n 'k',\n 'l',\n 'm',\n 'n',\n 'o',\n 'ong',\n 'ou',\n 'p',\n 'q',\n 'r',\n 's',\n 'sh',\n 't',\n 'u',\n 'ua',\n 'uai',\n 'uan',\n 'uang',\n 'ui',\n 'un',\n 'uo',\n 'v',\n 'van',\n 've',\n 'vn',\n 'w',\n 'x',\n 'y',\n 'z',\n 'zh']\n\ndef get_initials_and_finals():\n initials = []\n finals = []\n for ph in phs:\n if ph in ('AP', 'SP'):\n continue\n elif ph[0] in ('a', 'e', 'i', 'o', 'u', 'v'):\n finals.append(ph)\n else:\n initials.append(ph)\n return initials, finals","repo_name":"OmniAiOrg/SonicScribe","sub_path":"utils/ph.py","file_name":"ph.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"tg","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"1619578317","text":"from django.conf.urls import url\nfrom . import views\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nurlpatterns = [\n url(r'^$', views.IndexView.as_view(), name='index' ),\n url(r'^apiv1/forecasts', views.ForecastList.as_view()),\n url(r'^register', views.UserFormView.as_view(), name='register' ),\n url(r'^login_user/$', views.login_user, name='login_user'),\n url(r'^logout_user/$', views.logout_user, name='logout_user'),\n]","repo_name":"abrophy/weather-app","sub_path":"weather/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"15527112422","text":"\"\"\"\nUtilitary functions for displaying informations in the terminal\n\"\"\"\n\n# Standard modules\nimport os\nimport sys\nimport time\nimport operator\nimport functools\n# External modules\nimport torch\nfrom torch.nn.modules.module import _addindent\nimport torchinfo.torchinfo as torchinfo\n\ntry:\n _, term_width = os.popen('stty size', 'r').read().split()\n term_width = int(term_width)\nexcept ValueError:\n term_width = 80\n\nTOTAL_BAR_LENGTH = 65.\nlast_time = time.time()\nbegin_time = last_time\ndef progress_bar(current, total, msg=None):\n global last_time, begin_time\n if current == 0:\n begin_time = time.time() # Reset for new bar.\n\n cur_len = int(TOTAL_BAR_LENGTH*current/total)\n rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1\n\n sys.stdout.write(' [{}>{}]'.format('='*cur_len, '.'*rest_len))\n\n cur_time = time.time()\n step_time = cur_time - last_time\n last_time = cur_time\n tot_time = cur_time - begin_time\n\n L = []\n L.append(' Step: %10s' % format_time(step_time))\n L.append(' | Tot: %10s' % format_time(tot_time))\n if msg:\n L.append(' | ' + msg)\n\n msg = ''.join(L)\n sys.stdout.write(msg)\n for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):\n sys.stdout.write(' ')\n\n # Go back to the center of the bar.\n for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):\n sys.stdout.write('\\b')\n sys.stdout.write(' %d/%d ' % (current+1, total))\n\n if current < total-1:\n sys.stdout.write('\\r')\n else:\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\ndef format_time(seconds):\n days = int(seconds / 3600/24)\n seconds = seconds - days*3600*24\n hours = int(seconds / 3600)\n seconds = seconds - hours*3600\n minutes = int(seconds / 60)\n seconds = seconds - minutes*60\n secondsf = int(seconds)\n seconds = seconds - secondsf\n millis = int(seconds*1000)\n\n f = ''\n i = 1\n if days > 0:\n f += str(days) + 'D'\n i += 1\n if hours > 0 and i <= 2:\n f += str(hours) + 'h'\n i += 1\n if minutes > 0 and i <= 2:\n f += str(minutes) + 'm'\n i += 1\n if secondsf > 0 and i <= 2:\n f += str(secondsf) + 's'\n i += 1\n if millis > 0 and i <= 2:\n f += str(millis) + 'ms'\n i += 1\n if f == '':\n f = '0ms'\n return f\n\n\ndef torch_summarize(model, input_size=None):\n \"\"\"Summarizes torch model by showing trainable parameters and weights.\"\"\"\n return torchinfo.summary(model, verbose=0, input_size=input_size)\n\ndef htmlize(txt):\n return txt.replace(' ', ' ').replace('\\n', ' \\n')\n","repo_name":"jeremyfix/deepcs","sub_path":"deepcs/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"71249237683","text":"import six\nimport inspect\nimport warnings\nfrom functools import wraps\nfrom itertools import product\nfrom AccessControl.SecurityManagement import getSecurityManager\nfrom AccessControl.SecurityManagement import setSecurityManager\nfrom AccessControl.SecurityManagement import newSecurityManager\nfrom Acquisition import aq_base, aq_parent\nfrom Products.ERP5Type.tests.utils import LogInterceptor\nfrom Testing import ZopeTestCase\nfrom Products.ERP5Type.tests.ERP5TypeTestCase import ERP5TypeTestCase\nfrom Products.ERP5Type.tests.utils import createZODBPythonScript\nfrom Products.ERP5Type.Base import Base\nfrom Products.CMFActivity import ActivityTool\nfrom Products.CMFActivity.Activity.SQLBase import INVOKE_ERROR_STATE\nfrom Products.CMFActivity.Activity.Queue import VALIDATION_ERROR_DELAY\nfrom Products.CMFActivity.Activity.SQLDict import SQLDict\nfrom Products.CMFActivity.Errors import ActivityPendingError, ActivityFlushError\nfrom Products.PluggableAuthService.PropertiedUser import PropertiedUser\nfrom erp5.portal_type import Organisation\nfrom AccessControl.SecurityManagement import newSecurityManager\nfrom zLOG import LOG\nfrom ZODB.POSException import ConflictError\nfrom DateTime import DateTime\nfrom Products.CMFActivity.ActivityTool import (\n cancelProcessShutdown, Message, getCurrentNode, getServerAddress)\nfrom MySQLdb import OperationalError\nfrom Products.ZMySQLDA.db import DB\nimport gc\nimport random\nimport threading\nimport weakref\nimport transaction\nfrom App.config import getConfiguration\nimport socket\n\nclass CommitFailed(Exception):\n pass\n\ndef for_each_activity(wrapped):\n def wrapper(self):\n getMessageList = self.portal.portal_activities.getMessageList\n for activity in ActivityTool.activity_dict:\n wrapped(self, activity)\n self.abort()\n self.assertFalse([\n x.__dict__ for x in getMessageList()\n ])\n return wraps(wrapped)(wrapper)\n\ndef registerFailingTransactionManager(*args, **kw):\n from Shared.DC.ZRDB.TM import TM\n class dummy_tm(TM):\n def tpc_vote(self, *ignored):\n raise CommitFailed\n def _finish(self):\n pass\n def _abort(self):\n pass\n dummy_tm()._register()\n\nclass LockOnce(object):\n\n def __init__(self):\n self.acquire = threading.Lock().acquire\n\n def release(self):\n pass\n\nclass TestCMFActivity(ERP5TypeTestCase, LogInterceptor):\n\n # Different variables used for this test\n company_id = 'Nexedi'\n title1 = 'title1'\n title2 = 'title2'\n company_id2 = 'Coramy'\n company_id3 = 'toto'\n\n def getTitle(self):\n return \"CMFActivity\"\n\n def getBusinessTemplateList(self):\n \"\"\"\n Return the list of business templates.\n \"\"\"\n return ('erp5_base', 'erp5_joblib')\n\n def getOrganisationModule(self):\n return self.portal.organisation_module\n\n def getOrganisation(self):\n return self.getOrganisationModule()._getOb(self.company_id)\n\n def afterSetUp(self):\n super(TestCMFActivity, self).afterSetUp()\n from Products.CMFActivity.ActivityRuntimeEnvironment import BaseMessage\n # Set 'max_retry' to a known value so that we can test the feature\n BaseMessage.max_retry = property(lambda self:\n self.activity_kw.get('max_retry', 5))\n self.login()\n # Then add new components\n organisation_module = self.getOrganisationModule()\n if not(organisation_module.hasContent(self.company_id)):\n o1 = organisation_module.newContent(id=self.company_id)\n self.tic()\n\n def tearDown(self):\n # Override ERP5 tearDown to make sure that tests do not leave unprocessed\n # activity messages. We are testing CMFActivity so it's important to check\n # that everything works as expected on this subject.\n try:\n if self._resultForDoCleanups.wasSuccessful():\n getMessageList = self.portal.portal_activities.getMessageList\n self.assertFalse(getMessageList())\n # Also check if a test drop them without committing.\n self.abort()\n self.assertFalse(getMessageList())\n finally:\n ERP5TypeTestCase.tearDown(self)\n\n def getMessageList(self, activity, **kw):\n return ActivityTool.activity_dict[activity].getMessageList(\n self.portal.portal_activities, **kw)\n\n def deleteMessageList(self, activity, message_list):\n ActivityTool.activity_dict[activity].deleteMessageList(\n self.portal.portal_activities.getSQLConnection(),\n [m.uid for m in message_list])\n self.commit()\n\n def login(self):\n uf = self.portal.acl_users\n uf._doAddUser('seb', '', ['Manager'], [])\n uf._doAddUser('ERP5TypeTestCase', '', ['Manager'], [])\n user = uf.getUserById('seb').__of__(uf)\n newSecurityManager(None, user)\n\n def ticOnce(self, *args, **kw):\n is_running_lock = ActivityTool.is_running_lock\n try:\n ActivityTool.is_running_lock = LockOnce()\n self.portal.portal_activities.tic(*args, **kw)\n finally:\n ActivityTool.is_running_lock = is_running_lock\n\n @for_each_activity\n def testInvokeAndCancelActivity(self, activity):\n \"\"\"\n Simple test where we invoke and cancel an activity\n \"\"\"\n activity_tool = self.portal.portal_activities\n organisation = self.getOrganisation()\n organisation._setTitle(self.title1)\n self.assertEqual(self.title1,organisation.getTitle())\n organisation.activate(activity=activity)._setTitle(self.title2)\n # Needed so that the message are commited into the queue\n self.commit()\n message_list = activity_tool.getMessageList()\n self.assertEqual(len(message_list),1)\n activity_tool.manageCancel(organisation.getPhysicalPath(),'_setTitle')\n # Needed so that the message are removed from the queue\n self.commit()\n self.assertEqual(self.title1,organisation.getTitle())\n message_list = activity_tool.getMessageList()\n self.assertEqual(len(message_list),0)\n organisation.activate(activity=activity)._setTitle(self.title2)\n # Needed so that the message are commited into the queue\n self.commit()\n message_list = activity_tool.getMessageList()\n self.assertEqual(len(message_list),1)\n activity_tool.manageInvoke(organisation.getPhysicalPath(),'_setTitle')\n # Needed so that the message are removed from the queue\n self.commit()\n self.assertEqual(self.title2,organisation.getTitle())\n\n @for_each_activity\n def testDeferredSetTitleActivity(self, activity):\n \"\"\"\n We check that the title is changed only after that\n the activity was called\n \"\"\"\n activity_tool = self.portal.portal_activities\n organisation = self.getOrganisation()\n organisation._setTitle(self.title1)\n self.assertEqual(self.title1,organisation.getTitle())\n organisation.activate(activity=activity)._setTitle(self.title2)\n # Needed so that the message are commited into the queue\n self.commit()\n self.assertEqual(self.title1,organisation.getTitle())\n activity_tool.tic()\n self.assertEqual(self.title2,organisation.getTitle())\n\n @for_each_activity\n def testCallOnceWithActivity(self, activity):\n \"\"\"\n With this test we can check if methods are called\n only once (sometimes it was twice !!!)\n \"\"\"\n activity_tool = self.portal.portal_activities\n def setFoobar(self):\n if hasattr(self,'foobar'):\n self.foobar = self.foobar + 1\n else:\n self.foobar = 1\n def getFoobar(self):\n return (getattr(self,'foobar',0))\n organisation = self.getOrganisation()\n Organisation.setFoobar = setFoobar\n Organisation.getFoobar = getFoobar\n organisation.foobar = 0\n organisation._setTitle(self.title1)\n self.assertEqual(0,organisation.getFoobar())\n organisation.activate(activity=activity).setFoobar()\n # Needed so that the message are commited into the queue\n self.commit()\n message_list = activity_tool.getMessageList()\n self.assertEqual(len(message_list),1)\n activity_tool.tic()\n self.assertEqual(1,organisation.getFoobar())\n message_list = activity_tool.getMessageList()\n self.assertEqual(len(message_list),0)\n organisation.activate(activity=activity).setFoobar()\n # Needed so that the message are commited into the queue\n self.commit()\n message_list = activity_tool.getMessageList()\n self.assertEqual(len(message_list),1)\n activity_tool.manageInvoke(organisation.getPhysicalPath(),'setFoobar')\n # Needed so that the message are commited into the queue\n self.commit()\n self.assertEqual(2,organisation.getFoobar())\n\n @for_each_activity\n def testTryFlushActivity(self, activity):\n \"\"\"\n Check the method flush\n \"\"\"\n organisation = self.getOrganisation()\n organisation._setTitle(self.title1)\n organisation.activate(activity=activity)._setTitle(self.title2)\n organisation.flushActivity(invoke=1)\n self.assertEqual(organisation.getTitle(),self.title2)\n self.commit()\n message_list = self.portal.portal_activities.getMessageList()\n self.assertEqual(len(message_list),0)\n self.assertEqual(organisation.getTitle(),self.title2)\n # Try again with different commit order\n organisation._setTitle(self.title1)\n organisation.activate(activity=activity)._setTitle(self.title2)\n self.commit()\n organisation.flushActivity(invoke=1)\n self.assertEqual(len(message_list),0)\n self.assertEqual(organisation.getTitle(),self.title2)\n self.commit()\n\n @for_each_activity\n def testTryActivateInsideFlush(self, activity):\n \"\"\"\n Create a new activity inside a flush action\n \"\"\"\n activity_tool = self.portal.portal_activities\n def DeferredSetTitle(self,value):\n self.activate(activity=activity)._setTitle(value)\n Organisation.DeferredSetTitle = DeferredSetTitle\n organisation = self.getOrganisation()\n organisation._setTitle(self.title1)\n organisation.activate(activity=activity).DeferredSetTitle(self.title2)\n organisation.flushActivity(invoke=1)\n self.commit()\n activity_tool.tic()\n self.commit()\n self.assertEqual(organisation.getTitle(),self.title2)\n\n @for_each_activity\n def testTryTwoMethods(self, activity):\n \"\"\"\n Try several activities\n \"\"\"\n activity_tool = self.portal.portal_activities\n def DeferredSetDescription(self,value):\n self._setDescription(value)\n def DeferredSetTitle(self,value):\n self._setTitle(value)\n Organisation.DeferredSetTitle = DeferredSetTitle\n Organisation.DeferredSetDescription = DeferredSetDescription\n organisation = self.getOrganisation()\n organisation._setTitle(None)\n organisation.setDescription(None)\n organisation.activate(activity=activity).DeferredSetTitle(self.title1)\n organisation.activate(activity=activity).DeferredSetDescription(self.title1)\n self.commit()\n activity_tool.distribute()\n activity_tool.tic()\n self.commit()\n self.assertEqual(organisation.getTitle(),self.title1)\n self.assertEqual(organisation.getDescription(),self.title1)\n self.tic()\n\n @for_each_activity\n def testTryTwoMethodsAndFlushThem(self, activity):\n \"\"\"\n make sure flush works with several activities\n \"\"\"\n activity_tool = self.portal.portal_activities\n def DeferredSetTitle(self,value):\n self.activate(activity=activity)._setTitle(value)\n def DeferredSetDescription(self,value):\n self.activate(activity=activity)._setDescription(value)\n Organisation.DeferredSetTitle = DeferredSetTitle\n Organisation.DeferredSetDescription = DeferredSetDescription\n organisation = self.getOrganisation()\n organisation._setTitle(None)\n organisation.setDescription(None)\n organisation.activate(activity=activity).DeferredSetTitle(self.title1)\n organisation.activate(activity=activity).DeferredSetDescription(self.title1)\n organisation.flushActivity(invoke=1)\n self.commit()\n activity_tool.distribute()\n activity_tool.tic()\n self.commit()\n self.assertEqual(organisation.getTitle(),self.title1)\n self.assertEqual(organisation.getDescription(),self.title1)\n\n def TryActivateFlushActivateTic(self, activity,second=None,commit_sub=0):\n \"\"\"\n try to commit sub transactions\n \"\"\"\n activity_tool = self.portal.portal_activities\n def DeferredSetTitle(self,value,commit_sub=0):\n if commit_sub:\n transaction.savepoint(optimistic=True)\n self.activate(activity=second or activity,priority=4)._setTitle(value)\n def DeferredSetDescription(self,value,commit_sub=0):\n if commit_sub:\n transaction.savepoint(optimistic=True)\n self.activate(activity=second or activity,priority=4)._setDescription(value)\n Organisation.DeferredSetTitle = DeferredSetTitle\n Organisation.DeferredSetDescription = DeferredSetDescription\n organisation = self.getOrganisation()\n organisation._setTitle(None)\n organisation.setDescription(None)\n organisation.activate(activity=activity).DeferredSetTitle(self.title1,commit_sub=commit_sub)\n organisation.flushActivity(invoke=1)\n organisation.activate(activity=activity).DeferredSetDescription(self.title1,commit_sub=commit_sub)\n self.commit()\n activity_tool.distribute()\n activity_tool.tic()\n self.commit()\n self.assertEqual(organisation.getTitle(),self.title1)\n self.assertEqual(organisation.getDescription(),self.title1)\n\n @for_each_activity\n def testTryMessageWithErrorOnActivity(self, activity):\n \"\"\"\n Make sure that message with errors are not deleted\n \"\"\"\n activity_tool = self.portal.portal_activities\n def crashThisActivity(self):\n self.IWillCrash()\n organisation = self.getOrganisation()\n Organisation.crashThisActivity = crashThisActivity\n organisation.activate(activity=activity).crashThisActivity()\n # Needed so that the message are commited into the queue\n self.commit()\n message_list = activity_tool.getMessageList()\n LOG('Before MessageWithErrorOnActivityFails, message_list',0,[x.__dict__ for x in message_list])\n self.assertEqual(len(message_list),1)\n activity_tool.tic()\n # XXX HERE WE SHOULD USE TIME SHIFT IN ORDER TO SIMULATE MULTIPLE TICS\n # Test if there is still the message after it crashed\n message_list = activity_tool.getMessageList()\n self.assertEqual(len(message_list),1)\n activity_tool.manageCancel(organisation.getPhysicalPath(),'crashThisActivity')\n # Needed so that the message are commited into the queue\n self.commit()\n\n @for_each_activity\n def testDeferredSetTitleWithRenamedObject(self, activity):\n \"\"\"\n make sure that it is impossible to rename an object\n if some activities are still waiting for this object\n \"\"\"\n organisation = self.getOrganisation()\n organisation._setTitle(self.title1)\n self.assertEqual(self.title1,organisation.getTitle())\n organisation.activate(activity=activity)._setTitle(self.title2)\n # Needed so that the message are commited into the queue\n self.commit()\n self.assertEqual(self.title1,organisation.getTitle())\n self.assertRaises(ActivityPendingError,organisation.edit,id=self.company_id2)\n self.portal.portal_activities.tic()\n\n def TryActiveProcess(self, activity):\n \"\"\"\n Try to store the result inside an active process\n \"\"\"\n activity_tool = self.portal.portal_activities\n organisation = self.getOrganisation()\n organisation._setTitle(self.title1)\n active_process = activity_tool.newActiveProcess()\n self.assertEqual(self.title1,organisation.getTitle())\n organisation.activate(activity=activity,active_process=active_process).getTitle()\n # Needed so that the message are commited into the queue\n self.commit()\n activity_tool.distribute()\n activity_tool.tic()\n self.assertEqual(self.title1,organisation.getTitle())\n result = active_process.getResultList()[0]\n self.assertEqual(result.method_id , 'getTitle')\n self.assertEqual(result.result , self.title1)\n # Execute any further activity which may have been spawned by activity\n # execution (ex: fulltext indeation of the active process).\n self.tic()\n\n def TryActiveProcessWithResultDict(self, activity):\n \"\"\"\n Try to store the result inside an active process using result list\n \"\"\"\n activity_tool = self.portal.portal_activities\n organisation = self.getOrganisation()\n organisation._setTitle(self.title1)\n active_process = activity_tool.newActiveProcess()\n self.assertEqual(self.title1,organisation.getTitle())\n\n # Post SQLjoblib tasks with explicit signature\n organisation.activate(activity=activity,active_process=active_process, signature=1).getTitle()\n organisation.activate(activity=activity,active_process=active_process, signature=2).getTitle()\n organisation.activate(activity=activity,active_process=active_process, signature=3).getTitle()\n\n self.commit()\n activity_tool.distribute()\n activity_tool.tic()\n result_dict = active_process.getResultDict()\n result = result_dict[1]\n self.assertEqual(result_dict[1].method_id, 'getTitle')\n self.assertEqual(result.result , self.title1)\n result = result_dict[2]\n self.assertEqual(result_dict[2].method_id, 'getTitle')\n self.assertEqual(result.result , self.title1)\n result = result_dict[3]\n self.assertEqual(result_dict[3].method_id, 'getTitle')\n self.assertEqual(result.result , self.title1)\n # Execute any further activity which may have been spawned by activity\n # execution (ex: fulltext indeation of the active process).\n self.tic()\n\n @for_each_activity\n def testTryMethodAfterMethod(self, activity):\n \"\"\"\n Ensure the order of an execution by a method id\n \"\"\"\n o = self.getOrganisation()\n\n o.setTitle('a')\n self.assertEqual(o.getTitle(), 'a')\n self.tic()\n\n def toto(self, value):\n self.setTitle(self.getTitle() + value)\n o.__class__.toto = toto\n\n def titi(self, value):\n self.setTitle(self.getTitle() + value)\n o.__class__.titi = titi\n\n o.activate(after_method_id = 'titi', activity = activity).toto('b')\n o.activate(activity = activity).titi('c')\n self.tic()\n self.assertEqual(o.getTitle(), 'acb')\n\n @for_each_activity\n def testTryAfterTag(self, activity):\n \"\"\"\n Ensure the order of an execution by a tag\n \"\"\"\n o = self.getOrganisation()\n\n o.setTitle('?')\n self.assertEqual(o.getTitle(), '?')\n self.tic()\n\n o.activate(after_tag = 'toto', activity = activity).setTitle('b')\n o.activate(tag = 'toto', activity = activity).setTitle('a')\n self.tic()\n self.assertEqual(o.getTitle(), 'b')\n\n o.setDefaultActivateParameterDict({'tag': 'toto'})\n def titi(self):\n self.setCorporateName(self.getTitle() + 'd')\n o.__class__.titi = titi\n o.activate(after_tag_and_method_id=('toto', 'setTitle'), activity = activity).titi()\n o.activate(activity = activity).setTitle('c')\n self.tic()\n self.assertEqual(o.getCorporateName(), 'cd')\n\n @for_each_activity\n def testTryFlushActivityWithAfterTag(self, activity):\n \"\"\"\n Ensure the order of an execution by a tag\n \"\"\"\n o = self.getOrganisation()\n\n o.setTitle('?')\n o.setDescription('?')\n self.assertEqual(o.getTitle(), '?')\n self.assertEqual(o.getDescription(), '?')\n self.tic()\n\n o.activate(after_tag = 'toto', activity = activity).setDescription('b')\n o.activate(tag = 'toto', activity = activity).setTitle('a')\n self.commit()\n tool = self.getActivityTool()\n self.assertRaises(ActivityFlushError,tool.manageInvoke,o.getPath(),'setDescription')\n tool.manageInvoke(o.getPath(),'setTitle')\n self.commit()\n self.assertEqual(o.getTitle(), 'a')\n self.assertEqual(o.getDescription(), '?')\n self.tic()\n self.assertEqual(o.getTitle(), 'a')\n self.assertEqual(o.getDescription(), 'b')\n\n @for_each_activity\n def testScheduling(self, activity):\n \"\"\"\n Check if active objects with different after parameters are executed in a correct order\n \"\"\"\n o = self.getOrganisation()\n\n o.setTitle('?')\n self.assertEqual(o.getTitle(), '?')\n self.tic()\n\n def toto(self, s):\n self.setTitle(self.getTitle() + s)\n o.__class__.toto = toto\n\n o.activate(tag = 'toto', activity = activity).toto('a')\n self.commit()\n o.activate(after_tag = 'titi', activity = activity).toto('b')\n self.commit()\n o.activate(tag = 'titi', after_tag = 'toto', activity = activity).setTitle('c')\n self.tic()\n self.assertEqual(o.getTitle(), 'cb')\n\n @for_each_activity\n def testSchedulingAfterTagList(self, activity):\n \"\"\"\n Check if active objects with different after parameters are executed in a\n correct order, when after_tag is passed as a list\n \"\"\"\n o = self.getOrganisation()\n\n o.setTitle('')\n self.tic()\n\n def toto(self, s):\n self.setTitle(self.getTitle() + s)\n o.__class__.toto = toto\n\n o.activate(tag='A', activity=activity).toto('a')\n self.commit()\n o.activate(tag='B', activity=activity).toto('b')\n self.commit()\n o.activate(after_tag=('A', 'B'), activity=activity).setTitle('last')\n self.tic()\n self.assertEqual(o.getTitle(), 'last')\n\n @for_each_activity\n def testCheckCountMessageWithTag(self, activity):\n \"\"\"\n Check countMessageWithTag function.\n \"\"\"\n activity_tool = self.portal.portal_activities\n o = self.getOrganisation()\n o.setTitle('?')\n self.tic()\n\n o.activate(tag = 'toto', activity = activity).setTitle('a')\n self.commit()\n self.assertEqual(o.getTitle(), '?')\n self.assertEqual(activity_tool.countMessageWithTag('toto'), 1)\n self.tic()\n self.assertEqual(o.getTitle(), 'a')\n self.assertEqual(activity_tool.countMessageWithTag('toto'), 0)\n\n def testTryErrorsWhileFinishingCommitDB(self):\n \"\"\"Try to execute active objects which may throw conflict errors\n while validating, and check if they are still executed.\"\"\"\n activity_tool = self.portal.portal_activities\n\n # Monkey patch Queue to induce conflict errors artificially.\n def query(self, query_string,*args, **kw):\n # Not so nice, this is specific to zsql method\n if \"REPLACE INTO\" in query_string:\n raise OperationalError\n return self.original_query(query_string,*args, **kw)\n\n # Test some range of conflict error occurences.\n self.portal.organisation_module.reindexObject()\n self.commit()\n message, = activity_tool.getMessageList()\n try:\n DB.original_query = DB.query\n DB.query = query\n activity_tool.distribute()\n activity_tool.tic()\n self.commit()\n finally:\n DB.query = DB.original_query\n del DB.original_query\n self.deleteMessageList('SQLDict', [message])\n\n @for_each_activity\n def testIsMessageRegisteredMethod(self, activity):\n dedup = activity != 'SQLQueue'\n activity_tool = self.portal.portal_activities\n object_b = self.getOrganisation()\n object_a = object_b.getParentValue()\n def check(count):\n self.commit()\n self.assertEqual(len(activity_tool.getMessageList()), count)\n self.tic()\n # First case: creating the same activity twice must only register one\n # for queues with deduplication.\n object_a.activate(activity=activity).getId()\n object_a.activate(activity=activity).getId()\n check(1 if dedup else 2)\n # Second case: creating activity with same tag must only register one,\n # for queues with deduplication.\n # This behaviour is actually the same as the no-tag behaviour.\n object_a.activate(activity=activity, tag='foo').getId()\n object_a.activate(activity=activity, tag='foo').getId()\n check(1 if dedup else 2)\n # Third case: creating activities with different tags must register both.\n object_a.activate(activity=activity, tag='foo').getId()\n object_a.activate(activity=activity, tag='bar').getId()\n check(2)\n # Fourth case: creating activities on different objects must register\n # both.\n object_a.activate(activity=activity).getId()\n object_b.activate(activity=activity).getId()\n check(2)\n # Fifth case: creating activities with different method must register\n # both.\n object_a.activate(activity=activity).getId()\n object_a.activate(activity=activity).getTitle()\n check(2)\n\n def test_33_TryActivateFlushActivateTicWithSQLDict(self):\n # Test if we call methods only once\n self.TryActivateFlushActivateTic('SQLDict')\n\n def test_34_TryActivateFlushActivateTicWithSQLQueue(self):\n # Test if we call methods only once\n self.TryActivateFlushActivateTic('SQLQueue')\n\n def test_37_TryActivateFlushActivateTicWithMultipleActivities(self):\n # Test if we call methods only once\n self.TryActivateFlushActivateTic('SQLQueue',second='SQLDict')\n self.TryActivateFlushActivateTic('SQLDict',second='SQLQueue')\n\n def test_38_TryCommitSubTransactionWithSQLDict(self):\n # Test if we call methods only once\n self.TryActivateFlushActivateTic('SQLDict',commit_sub=1)\n\n def test_39_TryCommitSubTransactionWithSQLQueue(self):\n # Test if we call methods only once\n self.TryActivateFlushActivateTic('SQLQueue',commit_sub=1)\n\n def test_46_TryActiveProcessWithSQLDict(self):\n # Test if we call methods only once\n self.TryActiveProcess('SQLDict')\n\n def test_47_TryActiveProcessWithSQLQueue(self):\n # Test if we call methods only once\n self.TryActiveProcess('SQLQueue')\n\n def test_48_TryActiveProcessWithSQLJoblib(self):\n # Test if we call methods only once\n self.TryActiveProcessWithResultDict('SQLJoblib')\n\n def test_57_TryCallActivityWithRightUser(self):\n # Test if me execute methods with the right user\n # This should be independant of the activity used\n # We are first logged as seb\n activity_tool = self.portal.portal_activities\n organisation = self.getOrganisation()\n # Add new user toto\n uf = self.portal.acl_users\n uf._doAddUser('toto', '', ['Manager'], [])\n user = uf.getUserById('toto').__of__(uf)\n newSecurityManager(None, user)\n # Execute something as toto\n organisation.activate().newContent(portal_type='Email',id='email')\n # Then execute activities as seb\n user = uf.getUserById('seb').__of__(uf)\n newSecurityManager(None, user)\n self.tic()\n email = organisation.get('email')\n # Check if what we did was executed as toto\n self.assertEqual(email.getOwnerInfo()['id'],'toto')\n\n def flushAllActivities(self, silent=0, loop_size=1000):\n \"\"\"Executes all messages until the queue only contains failed\n messages.\n \"\"\"\n activity_tool = self.portal.portal_activities\n for _ in xrange(loop_size):\n activity_tool.distribute(node_count=1)\n activity_tool.tic(processing_node=1)\n\n finished = all(message.processing_node == INVOKE_ERROR_STATE\n for message in activity_tool.getMessageList())\n\n activity_tool.timeShift(3 * VALIDATION_ERROR_DELAY)\n self.commit()\n if finished:\n return\n if not silent:\n self.fail('flushAllActivities maximum loop count reached')\n\n def test_68_TestMessageValidationAndFailedActivities(self):\n \"\"\"after_method_id and failed activities.\n\n Tests that if we have an active method scheduled by\n after_method_id and a failed activity with this method id, the\n method is NOT executed.\n\n Note: earlier version of this test checked exactly the contrary, but it\n was eventually agreed that this was a bug. If an activity fails, all the\n activities that depend on it should be block until the first one is\n resolved.\"\"\"\n activity_tool = self.portal.portal_activities\n original_title = 'something'\n obj = self.portal.organisation_module.newContent(\n portal_type='Organisation',\n title=original_title)\n # Monkey patch Organisation to add a failing method\n def failingMethod(self):\n raise ValueError('This method always fail')\n Organisation.failingMethod = failingMethod\n\n for activity in ActivityTool.activity_dict:\n # reset\n activity_tool.manageClearActivities()\n obj.setTitle(original_title)\n self.commit()\n\n # activate failing message and flush\n for fail_activity in ActivityTool.activity_dict:\n obj.activate(activity = fail_activity).failingMethod()\n self.commit()\n self.flushAllActivities(silent=1, loop_size=100)\n full_message_list = activity_tool.getMessageList()\n remaining_messages = [a for a in full_message_list if a.method_id !=\n 'failingMethod']\n if len(full_message_list) != 3:\n self.fail('failingMethod should not have been flushed')\n if len(remaining_messages) != 0:\n self.fail('Activity tool should have no other remaining messages')\n\n # activate our message\n new_title = 'nothing'\n obj.activate(after_method_id = ['failingMethod'],\n activity = activity ).setTitle(new_title)\n self.commit()\n self.flushAllActivities(silent=1, loop_size=100)\n full_message_list = activity_tool.getMessageList()\n remaining_messages = [a for a in full_message_list if a.method_id !=\n 'failingMethod']\n self.assertEqual(len(full_message_list), 4,\n 'failingMethod should not have been flushed')\n self.assertEqual(len(remaining_messages), 1,\n 'Activity tool should have one blocked setTitle activity')\n self.assertEqual(remaining_messages[0].activity_kw['after_method_id'],\n ['failingMethod'])\n self.assertEqual(obj.getTitle(), original_title)\n\n activity_tool.manageClearActivities()\n self.commit()\n\n def test_70_TestCancelFailedActiveObject(self):\n \"\"\"Cancel an active object to make sure that it does not refer to\n a persistent object.\n \"\"\"\n activity_tool = self.portal.portal_activities\n\n original_title = 'something'\n obj = self.portal.organisation_module.newContent(\n portal_type='Organisation',\n title=original_title)\n\n # Monkey patch Organisation to add a failing method\n def failingMethod(self):\n raise ValueError('This method always fail')\n Organisation.failingMethod = failingMethod\n\n # First, index the object.\n self.commit()\n self.flushAllActivities(silent=1, loop_size=100)\n self.assertEqual(len(activity_tool.getMessageList()), 0)\n\n # Insert a failing active object.\n obj.activate().failingMethod()\n self.commit()\n self.assertEqual(len(activity_tool.getMessageList()), 1)\n\n # Just wait for the active object to be abandoned.\n self.flushAllActivities(silent=1, loop_size=100)\n self.assertEqual(len(activity_tool.getMessageList()), 1)\n self.assertEqual(activity_tool.getMessageList()[0].processing_node,\n INVOKE_ERROR_STATE)\n\n # Make sure that persistent objects are not present in the connection\n # cache to emulate a restart of Zope. So all volatile attributes will\n # be flushed, and persistent objects will be reloaded.\n activity_tool._p_jar._resetCache()\n\n # Cancel it via the management interface.\n message = activity_tool.getMessageList()[0]\n activity_tool.manageCancel(message.object_path, message.method_id)\n self.commit()\n\n def test_71_RetryMessageExecution(self):\n activity_tool = self.portal.portal_activities\n exec_count = [0]\n # priority does not matter anymore\n priority = random.Random().randint\n def doSomething(self, retry_list):\n i = exec_count[0]\n exec_count[0] = i + 1\n conflict, edit_kw = retry_list[i]\n if edit_kw:\n self.getActivityRuntimeEnvironment().edit(**edit_kw)\n if conflict is not None:\n raise ConflictError if conflict else Exception\n def check(retry_list, **activate_kw):\n fail = retry_list[-1][0] is not None and 1 or 0\n for activity in ActivityTool.activity_dict:\n exec_count[0] = 0\n activity_tool.activate(activity=activity, priority=priority(1,6),\n **activate_kw).doSomething(retry_list)\n self.commit()\n self.flushAllActivities(silent=1)\n self.assertEqual(len(retry_list), exec_count[0])\n self.assertEqual(fail, len(activity_tool.getMessageList()))\n activity_tool.manageCancel(\n activity_tool.getPhysicalPath(), 'doSomething')\n self.commit()\n activity_tool.__class__.doSomething = doSomething\n try:\n ## Default behaviour\n # Usual successful case: activity is run only once\n check([(None, None)])\n # Usual error case: activity is run 6 times before being frozen\n check([(False, None)] * 6)\n # On ConflictError, activity is reexecuted without increasing retry count\n check([(True, None)] * 10 + [(None, None)])\n check([(True, None), (False, None)] * 6)\n ## Customized behaviour\n # Do not retry\n check([(False, {'max_retry': 0})])\n # ... even in case of ConflictError\n check([(True, {'max_retry': 0}),\n (True, {'max_retry': 0, 'conflict_retry': 0})])\n check([(True, None)] * 6, conflict_retry=False)\n # Customized number of retries\n for n in 3, 9:\n check([(False, {'max_retry': n})] * n + [(None, None)])\n check([(False, {'max_retry': n})] * (n + 1))\n # Infinite retry\n for n in 3, 9:\n check([(False, {'max_retry': None})] * n + [(None, None)])\n check([(False, {'max_retry': None})] * n + [(False, {'max_retry': 0})])\n check([(False, {'max_retry': None})] * 9 + [(False, None)])\n\n finally:\n del activity_tool.__class__.doSomething\n\n def test_79_ActivateKwForNewContent(self):\n o1 = self.getOrganisationModule().newContent(\n activate_kw=dict(tag='The Tag'))\n self.commit()\n m, = self.getActivityTool().getMessageList(path=o1.getPath())\n self.assertEqual(m.activity_kw.get('tag'), 'The Tag')\n self.tic()\n\n def test_80_FlushAfterMultipleActivate(self):\n orga_module = self.getOrganisationModule()\n p = orga_module.newContent(portal_type='Organisation')\n self.tic()\n self.assertEqual(p.getDescription(), \"\")\n activity_tool = self.portal.portal_activities\n\n def updateDesc(self):\n d =self.getDescription()\n self.setDescription(d+'a')\n Organisation.updateDesc = updateDesc\n\n # First check dequeue read same message only once\n for i in xrange(10):\n p.activate(activity=\"SQLDict\").updateDesc()\n self.commit()\n\n self.assertEqual(len(activity_tool.getMessageList()), 10)\n self.tic()\n self.assertEqual(p.getDescription(), \"a\")\n\n # Check if there is pending activity after deleting an object\n for i in xrange(10):\n p.activate(activity=\"SQLDict\").updateDesc()\n self.commit()\n\n self.assertEqual(len(activity_tool.getMessageList()), 10)\n activity_tool.flush(p, invoke=0)\n self.commit()\n\n @for_each_activity\n def testCallWithGroupIdParamater(self, activity):\n dedup = activity != 'SQLQueue'\n activity_tool = self.portal.portal_activities\n organisation = self.getOrganisation()\n # Defined a group method\n foobar_list = []\n def setFoobar(self, object_list):\n foobar_list.append(len(object_list))\n for m in object_list:\n obj = m.object\n obj.foobar += m.kw.get('number', 1)\n m.result = None\n from Products.ERP5Type.Core.Folder import Folder\n Folder.setFoobar = setFoobar\n\n Organisation.getFoobar = lambda self: self.foobar\n\n organisation.foobar = 0\n self.assertEqual(0,organisation.getFoobar())\n\n # Test group_method_id is working without group_id\n for x in xrange(5):\n organisation.activate(activity=activity, group_method_id=\"organisation_module/setFoobar\").reindexObject(number=1)\n self.commit()\n\n message_list = activity_tool.getMessageList()\n self.assertEqual(len(message_list),5)\n activity_tool.tic()\n expected = 1 if dedup else 5\n self.assertEqual(expected, organisation.getFoobar())\n\n\n # Test group_method_id is working with one group_id defined\n for x in xrange(5):\n organisation.activate(activity=activity, group_method_id=\"organisation_module/setFoobar\", group_id=\"1\").reindexObject(number=1)\n self.commit()\n\n message_list = activity_tool.getMessageList()\n self.assertEqual(len(message_list),5)\n activity_tool.tic()\n self.assertEqual(expected * 2, organisation.getFoobar())\n\n self.assertEqual([expected, expected], foobar_list)\n del foobar_list[:]\n\n # Test group_method_id is working with many group_id defined\n for x in xrange(5):\n organisation.activate(activity=activity, group_method_id=\"organisation_module/setFoobar\", group_id=\"1\").reindexObject(number=1)\n self.commit()\n organisation.activate(activity=activity, group_method_id=\"organisation_module/setFoobar\", group_id=\"2\").reindexObject(number=3)\n self.commit()\n organisation.activate(activity=activity, group_method_id=\"organisation_module/setFoobar\", group_id=\"1\").reindexObject(number=1)\n self.commit()\n organisation.activate(activity=activity, group_method_id=\"organisation_module/setFoobar\", group_id=\"3\").reindexObject(number=5)\n self.commit()\n\n message_list = activity_tool.getMessageList()\n self.assertEqual(len(message_list),20)\n activity_tool.tic()\n self.assertEqual(11 if dedup else 60,\n organisation.getFoobar())\n self.assertEqual([1, 1, 1] if dedup else [5, 5, 10],\n sorted(foobar_list))\n\n def test_84_ActivateKwForWorkflowTransition(self):\n \"\"\"\n Test call of a workflow transition with activate_kw parameter propagate them\n \"\"\"\n o1 = self.getOrganisationModule().newContent()\n self.tic()\n o1.validate(activate_kw=dict(tag='The Tag'))\n self.commit()\n m, = self.getActivityTool().getMessageList(path=o1.getPath())\n self.assertEqual(m.activity_kw.get('tag'), 'The Tag')\n self.tic()\n\n def test_85_LossOfVolatileAttribute(self):\n \"\"\"\n Test that the loss of volatile attribute doesn't loose activities\n \"\"\"\n activity_tool = self.getActivityTool()\n def delete_volatiles():\n for property_id in activity_tool.__dict__.keys():\n if property_id.startswith('_v_'):\n delattr(activity_tool, property_id)\n organisation_module = self.getOrganisationModule()\n active_organisation_module = organisation_module.activate()\n delete_volatiles()\n # Cause a message to be created\n # If the buffer cannot be created, this will raise\n active_organisation_module.getTitle()\n delete_volatiles()\n # Another activity to check that first one did not get lost even if volatile disapears\n active_organisation_module.getId()\n self.commit()\n message_list = activity_tool.getMessageList()\n self.assertEqual(len(message_list), 2)\n self.tic()\n\n def test_88_ProcessingMultipleMessagesMustRevertIndividualMessagesOnError(self):\n \"\"\"\n Check that, on queues which support it, processing a batch of multiple\n messages doesn't cause failed ones to becommited along with succesful\n ones.\n\n Queues supporting message batch processing:\n - SQLQueue\n \"\"\"\n activity_tool = self.getActivityTool()\n obj = self.portal.organisation_module.newContent(portal_type='Organisation')\n active_obj = obj.activate(activity='SQLQueue')\n def appendToTitle(self, to_append, fail=False):\n self.setTitle(self.getTitle() + to_append)\n if fail:\n raise ValueError('This method always fail')\n try:\n Organisation.appendToTitle = appendToTitle\n obj.setTitle('a')\n active_obj.appendToTitle('b')\n active_obj.appendToTitle('c', fail=True)\n active_obj.appendToTitle('d')\n object_id = obj.getId()\n self.commit()\n self.assertEqual(obj.getTitle(), 'a')\n self.assertEqual(activity_tool.countMessage(method_id='appendToTitle'), 3)\n self.flushAllActivities(silent=1, loop_size=100)\n self.assertEqual(sorted(obj.getTitle()), ['a', 'b', 'd'])\n message, = self.getMessageList('SQLQueue', method_id='appendToTitle')\n self.deleteMessageList('SQLQueue', [message])\n finally:\n del Organisation.appendToTitle\n\n def test_89_RequestIsolationInsideSameTic(self):\n \"\"\"\n Check that request information do not leak from one activity to another\n inside the same TIC invocation.\n This only apply to queues supporting batch processing:\n - SQLQueue\n \"\"\"\n obj = self.portal.organisation_module.newContent(portal_type='Organisation', title='Pending')\n marker_id = 'marker_%i' % (random.randint(1, 10), )\n def putMarkerValue(self, marker_id):\n self.REQUEST.set(marker_id, 1)\n def checkMarkerValue(self, marker_id):\n if self.REQUEST.get(marker_id) is not None:\n self.setTitle('Failed')\n else:\n self.setTitle('Success')\n try:\n Organisation.putMarkerValue = putMarkerValue\n Organisation.checkMarkerValue = checkMarkerValue\n obj.activate(activity='SQLQueue', tag='set_first').putMarkerValue(marker_id=marker_id)\n obj.activate(activity='SQLQueue', after_tag='set_first').checkMarkerValue(marker_id=marker_id)\n self.assertEqual(obj.getTitle(), 'Pending')\n self.tic()\n self.assertEqual(obj.getTitle(), 'Success')\n finally:\n del Organisation.putMarkerValue\n del Organisation.checkMarkerValue\n\n def test_globalrequest(self):\n \"\"\"zope.globalrequest.getRequest (also known as Products.Global.get_request)\n should be same as app.REQUEST, also when executing activities.\n \"\"\"\n from zope.globalrequest import getRequest\n get_request_before = getRequest()\n def checkRequest(active_self):\n self.assertIs(getRequest(), active_self.REQUEST)\n\n obj = self.portal.organisation_module.newContent(portal_type='Organisation')\n Organisation.checkRequest = checkRequest\n try:\n obj.activate(activity='SQLQueue').checkRequest()\n obj.activate(activity='SQLDict').checkRequest()\n self.tic()\n finally:\n del Organisation.checkRequest\n self.assertIs(getRequest(), get_request_before)\n\n @for_each_activity\n def testTryUserNotificationOnActivityFailure(self, activity):\n message_list = self.portal.MailHost._message_list\n del message_list[:]\n portal_activities = self.portal.portal_activities\n countMessage = portal_activities.countMessage\n obj = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n def failingMethod(self): raise ValueError('This method always fails')\n Organisation.failingMethod = failingMethod\n try:\n portal_activities.activity_failure_mail_notification = True\n # MESSAGE_NOT_EXECUTED\n obj.activate(activity=activity).failingMethod()\n self.commit()\n self.assertFalse(message_list)\n self.flushAllActivities(silent=1, loop_size=100)\n # Check there is a traceback in the email notification\n sender, recipients, mail = message_list.pop()\n self.assertIn(\"Module %s, line %s, in failingMethod\" % (\n __name__, inspect.getsourcelines(failingMethod)[1]), mail)\n self.assertIn(\"ValueError:\", mail)\n portal_activities.manageClearActivities()\n # MESSAGE_NOT_EXECUTABLE\n obj_path = obj.getPath()\n obj.activate(activity=activity).failingMethod()\n self.commit()\n obj.getParentValue()._delObject(obj.getId())\n self.commit()\n self.assertGreater(countMessage(path=obj_path), 0)\n self.tic()\n self.assertEqual(countMessage(path=obj_path), 0)\n self.assertFalse(message_list)\n finally:\n self.portal.portal_activities.activity_failure_mail_notification = True\n del Organisation.failingMethod\n\n @for_each_activity\n def testTryUserNotificationDisabledOnActivityFailure(self, activity):\n message_list = self.portal.MailHost._message_list\n del message_list[:]\n portal_activities = self.portal.portal_activities\n countMessage = portal_activities.countMessage\n obj = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n def failingMethod(self): raise ValueError('This method always fails')\n Organisation.failingMethod = failingMethod\n try:\n portal_activities.activity_failure_mail_notification = False\n # MESSAGE_NOT_EXECUTED\n obj.activate(activity=activity).failingMethod()\n self.commit()\n self.assertFalse(message_list)\n self.flushAllActivities(silent=1, loop_size=100)\n # Check there is a traceback in the email notification\n self.assertFalse(message_list)\n portal_activities.manageClearActivities()\n # MESSAGE_NOT_EXECUTABLE\n obj_path = obj.getPath()\n obj.activate(activity=activity).failingMethod()\n self.commit()\n obj.getParentValue()._delObject(obj.getId())\n self.commit()\n self.assertGreater(countMessage(path=obj_path), 0)\n self.tic()\n self.assertEqual(countMessage(path=obj_path), 0)\n self.assertFalse(message_list)\n finally:\n portal_activities.activity_failure_mail_notification = True\n del Organisation.failingMethod\n\n def test_93_tryUserNotificationRaise(self):\n activity_tool = self.portal.portal_activities\n obj = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n original_notifyUser = Message.notifyUser\n def failingMethod(self, *args, **kw):\n raise ValueError('This method always fail')\n Message.notifyUser = failingMethod\n Organisation.failingMethod = failingMethod\n try:\n for activity in ActivityTool.activity_dict:\n obj.activate(activity=activity, priority=6).failingMethod()\n self.commit()\n self.flushAllActivities(silent=1, loop_size=100)\n message, = activity_tool.getMessageList(\n activity=activity, method_id='failingMethod')\n self.assertEqual(message.processing_node, INVOKE_ERROR_STATE)\n self.assertTrue(message.retry)\n activity_tool.manageDelete(message.uid, activity)\n self.commit()\n finally:\n Message.notifyUser = original_notifyUser\n del Organisation.failingMethod\n\n @for_each_activity\n def testTryActivityRaiseInCommitDoesNotStallActivityConection(self, activity):\n \"\"\"\n Check that an activity which commit raises (as would a regular conflict\n error be raised in tpc_vote) does not cause activity connection to\n stall.\n \"\"\"\n try:\n Organisation.registerFailingTransactionManager = registerFailingTransactionManager\n obj = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n now = DateTime()\n obj.activate(activity=activity).registerFailingTransactionManager()\n self.commit()\n self.flushAllActivities(silent=1, loop_size=100)\n self.commit()\n # Check that cmf_activity SQL connection still works\n connection_da = self.portal.cmf_activity_sql_connection()\n self.assertFalse(connection_da._registered)\n connection_da.query('select 1')\n self.assertTrue(connection_da._registered)\n self.commit()\n self.assertFalse(connection_da._registered)\n message, = self.getMessageList(activity)\n self.deleteMessageList(activity, [message])\n finally:\n del Organisation.registerFailingTransactionManager\n\n @for_each_activity\n def testTryActivityRaiseInCommitDoesNotLoseMessages(self, activity):\n \"\"\"\n \"\"\"\n try:\n Organisation.registerFailingTransactionManager = registerFailingTransactionManager\n obj = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n now = DateTime()\n obj.activate(activity=activity).registerFailingTransactionManager()\n self.commit()\n self.flushAllActivities(silent=1, loop_size=100)\n self.commit()\n message, = self.getMessageList(activity,\n method_id='registerFailingTransactionManager')\n self.deleteMessageList(activity, [message])\n finally:\n del Organisation.registerFailingTransactionManager\n\n @for_each_activity\n def testTryChangeSkinInActivity(self, activity):\n activity_tool = self.getActivityTool()\n def changeSkinToNone(self):\n self.getPortalObject().changeSkin(None)\n Organisation.changeSkinToNone = changeSkinToNone\n try:\n organisation = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n organisation.activate(activity=activity).changeSkinToNone()\n self.commit()\n self.assertEqual(len(activity_tool.getMessageList()), 1)\n self.flushAllActivities(silent=1, loop_size=100)\n finally:\n del Organisation.changeSkinToNone\n\n @for_each_activity\n def testDeduplicatingQueuesDoNotDeleteSimilaritiesBeforeExecution(self,\n activity):\n \"\"\"\n Test that SQLDict does not delete similar messages which have the same\n method_id and path but a different tag before execution.\n \"\"\"\n if activity == 'SQLQueue':\n return\n activity_tool = self.getActivityTool()\n marker = []\n def doSomething(self, other_tag):\n marker.append(self.countMessage(tag=other_tag))\n activity_tool.__class__.doSomething = doSomething\n try:\n # Adds two similar but not the same activities.\n activity_tool.activate(activity=activity, after_tag='foo',\n tag='a').doSomething(other_tag='b')\n activity_tool.activate(activity=activity, after_tag='bar',\n tag='b').doSomething(other_tag='a')\n self.commit()\n activity_tool.tic() # make sure distribution phase was not skipped\n activity_tool.distribute()\n # after distribute, similarities are still there.\n self.assertEqual(len(self.getMessageList(activity)), 2)\n activity_tool.tic()\n self.assertEqual(marker, [1])\n finally:\n del activity_tool.__class__.doSomething\n\n @for_each_activity\n def testDeduplicatingQueuesDoNotDeleteDuplicatesBeforeExecution(self,\n activity):\n \"\"\"\n Test that SQLDict does not delete messages before execution\n even if messages have the same method_id and path and tag.\n There could be other things which differ (ex: serialization_tag) and may\n not all be cheap to check during validation. Validation node is the only\n non-paralelisable Zope-side task around activities, so it should be kept\n simple.\n Deduplication is cheap:\n - inside the transaction which spawned duplicate activities, because it\n has to have created activities around anyway, and can keep track\n - inside the CMFActivity-level processing surrounding activity execution\n because it has to load the activities to process them anyway\n \"\"\"\n if activity == 'SQLQueue':\n return\n activity_tool = self.getActivityTool()\n # Adds two same activities.\n activity_tool.activate(activity=activity, after_tag='foo', priority=2,\n tag='a').getId()\n self.commit()\n uid1, = [x.uid for x in self.getMessageList(activity)]\n activity_tool.activate(activity=activity, after_tag='bar', priority=1,\n tag='a').getId()\n self.commit()\n uid2, = [x.uid for x in self.getMessageList(activity) if x.uid != uid1]\n self.assertEqual(len(activity_tool.getMessageList()), 2)\n activity_tool.distribute()\n # After distribute, duplicate is still present.\n self.assertItemsEqual([uid1, uid2],\n [x.uid for x in self.getMessageList(activity)])\n activity_tool.tic()\n\n def testCheckSQLDictDistributeWithSerializationTagAndGroupMethodId(self):\n \"\"\"\n Distribuation was at some point buggy with this scenario when there was\n activate with the same serialization_tag and one time with a group_method\n id and one without group_method_id :\n foo.activate(serialization_tag='a', group_method_id='x').getTitle()\n foo.activate(serialization_tag='a').getId()\n \"\"\"\n def getMessageList():\n return [\n (x.activity_kw['serialization_tag'], x.processing_node)\n for x in activity_tool.getMessageList()\n ]\n def activate(serialization_tag='a'):\n organisation.activate(\n serialization_tag=serialization_tag,\n group_method_id='portal_catalog/catalogObjectList',\n ).getTitle()\n organisation = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n activity_tool = self.getActivityTool()\n activate('a')\n self.commit()\n activate('a')\n self.commit()\n # Both activities are queued\n self.assertItemsEqual(\n getMessageList(),\n [\n ('a', -1),\n ('a', -1),\n ],\n )\n activity_tool.distribute()\n # Both activities are validated at the same time.\n # Note: this specific test implmeentation relies on the absence of\n # validation-time deduplication which is not strictly related to\n # serialization_tag behaviour.\n self.assertItemsEqual(\n getMessageList(),\n [\n ('a', 0),\n ('a', 0),\n ],\n )\n activate('a')\n self.commit()\n activate('b')\n self.commit()\n # 3rd & 4th activities queued\n self.assertItemsEqual(\n getMessageList(),\n [\n ('a', 0),\n ('a', 0),\n ('a', -1),\n ('b', -1),\n ],\n )\n activity_tool.distribute()\n # 3rd activity does not get validated, 4th is validated\n self.assertItemsEqual(\n getMessageList(),\n [\n ('a', 0),\n ('a', 0),\n ('a', -1),\n ('b', 0),\n ],\n )\n # 1st, 2nd and 4th are executed, then 3rd gets validated an executed,\n # and the queue ends empty.\n self.tic()\n\n def test_104_interQueuePriorities(self):\n \"\"\"\n Important note: there is no way to really reliably check that this\n feature is correctly implemented, as activity execution order is\n non-deterministic.\n The best which can be done is to check that under certain circumstances\n the activity exeicution order match expectations.\n \"\"\"\n organisation = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n activity_tool = self.getActivityTool()\n check_result_dict = {}\n def runAndCheck():\n check_result_dict.clear()\n self.commit()\n self.assertEqual(len(check_result_dict), 0)\n self.tic()\n self.assertEqual(len(check_result_dict), 2)\n self.assertTrue(check_result_dict['before_ran'])\n self.assertTrue(check_result_dict['after_ran'])\n def mustRunBefore(self):\n check_result_dict['before_ran'] = 'after_ran' not in check_result_dict\n def mustRunAfter(self):\n check_result_dict['after_ran'] = 'before_ran' in check_result_dict\n Organisation.mustRunBefore = mustRunBefore\n Organisation.mustRunAfter = mustRunAfter\n try:\n # Check that ordering looks good (SQLQueue first)\n organisation.activate(activity='SQLQueue', priority=1).mustRunBefore()\n organisation.activate(activity='SQLDict', priority=2).mustRunAfter()\n runAndCheck()\n # Check that ordering looks good (SQLDict first)\n organisation.activate(activity='SQLDict', priority=1).mustRunBefore()\n organisation.activate(activity='SQLQueue', priority=2).mustRunAfter()\n runAndCheck()\n # Check that tag takes precedence over priority (SQLQueue first by priority)\n organisation.activate(activity='SQLQueue', priority=1, after_tag='a').mustRunAfter()\n organisation.activate(activity='SQLDict', priority=2, tag='a').mustRunBefore()\n runAndCheck()\n # Check that tag takes precedence over priority (SQLDict first by priority)\n organisation.activate(activity='SQLDict', priority=1, after_tag='a').mustRunAfter()\n organisation.activate(activity='SQLQueue', priority=2, tag='a').mustRunBefore()\n runAndCheck()\n finally:\n del Organisation.mustRunBefore\n del Organisation.mustRunAfter\n\n @for_each_activity\n def testCheckActivityRuntimeEnvironment(self, activity):\n document = self.portal.organisation_module\n activity_result = []\n def extractActivityRuntimeEnvironment(self):\n activity_result.append(self.getActivityRuntimeEnvironment())\n document.__class__.doSomething = extractActivityRuntimeEnvironment\n try:\n document.activate(activity=activity).doSomething()\n self.commit()\n # Check that getActivityRuntimeEnvironment raises outside of activities\n self.assertRaises(KeyError, document.getActivityRuntimeEnvironment)\n # Check Runtime isolation\n self.tic()\n # Check that it still raises outside of activities\n self.assertRaises(KeyError, document.getActivityRuntimeEnvironment)\n # Check activity runtime environment instance\n env = activity_result.pop()\n self.assertFalse(activity_result)\n message = env._message\n self.assertEqual(message.line.priority, 1)\n self.assertEqual(message.object_path, document.getPhysicalPath())\n self.assertTrue(message.conflict_retry) # default value\n env.edit(max_retry=0, conflict_retry=False)\n self.assertFalse(message.conflict_retry) # edited value\n self.assertRaises(AttributeError, env.edit, foo='bar')\n finally:\n del document.__class__.doSomething\n\n @for_each_activity\n def testSerializationTag(self, activity):\n organisation = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n activity_tool = self.getActivityTool()\n # First scenario: activate, distribute, activate, distribute\n # Create first activity and distribute: it must be distributed\n organisation.activate(activity=activity, serialization_tag='1').getTitle()\n self.commit()\n result = activity_tool.getMessageList()\n self.assertEqual(len(result), 1)\n activity_tool.distribute()\n result = activity_tool.getMessageList()\n self.assertEqual(len([x for x in result if x.processing_node == 0]), 1)\n # Create second activity and distribute: it must *NOT* be distributed\n organisation.activate(activity=activity, serialization_tag='1').getTitle()\n self.commit()\n result = activity_tool.getMessageList()\n self.assertEqual(len(result), 2)\n activity_tool.distribute()\n result = activity_tool.getMessageList()\n self.assertEqual(len([x for x in result if x.processing_node == 0]), 1) # Distributed message list len is still 1\n self.tic()\n # Second scenario: activate, activate, distribute\n # Both messages must be distributed (this is different from regular tags)\n organisation.activate(activity=activity, serialization_tag='1', priority=2).getTitle()\n # Use a different method just so that SQLDict doesn't merge both activities prior to insertion.\n organisation.activate(activity=activity, serialization_tag='1', priority=1).getId()\n self.commit()\n result = activity_tool.getMessageList()\n self.assertEqual(len(result), 2)\n activity_tool.distribute()\n result = activity_tool.getMessageList()\n # at most 1 activity for a given serialization tag can be validated\n message, = [x for x in result if x.processing_node == 0]\n self.assertEqual(message.method_id, 'getId')\n # the other one is still waiting for validation\n message, = [x for x in result if x.processing_node == -1]\n self.assertEqual(message.method_id, 'getTitle')\n self.tic()\n # Check that giving a None value to serialization_tag does not confuse\n # CMFActivity\n organisation.activate(activity=activity, serialization_tag=None).getTitle()\n self.tic()\n\n def test_110_testAbsoluteUrl(self):\n # Tests that absolute_url works in activities. The URL generation is based\n # on REQUEST information when the method was activated.\n request = self.portal.REQUEST\n\n request.setServerURL('http', 'test.erp5.org', '9080')\n request.other['PARENTS'] = [self.portal.organisation_module]\n request.setVirtualRoot('virtual_root')\n\n calls = []\n def checkAbsoluteUrl(self):\n calls.append(self.absolute_url())\n Organisation.checkAbsoluteUrl = checkAbsoluteUrl\n\n try:\n o = self.portal.organisation_module.newContent(\n portal_type='Organisation', id='test_obj')\n self.assertEqual(o.absolute_url(),\n 'http://test.erp5.org:9080/virtual_root/test_obj')\n o.activate().checkAbsoluteUrl()\n\n # Reset server URL and virtual root before executing messages.\n # This simulates the case of activities beeing executed with different\n # REQUEST, such as TimerServer.\n # BBB Zope2: port argument below needs to be str in Zope2, but if we provide '443',\n # Zope4 will return absolute_url() with ':443' and Zope2 will return without '443'.\n # This is why we use '444' here.\n request.setServerURL('https', 'anotherhost.erp5.org', '444')\n request.other['PARENTS'] = [self.app]\n request.setVirtualRoot('')\n # obviously, the object url is different\n self.assertEqual(o.absolute_url(),\n 'https://anotherhost.erp5.org:444/%s/organisation_module/test_obj'\n % self.portal.getId())\n\n # but activities are executed using the previous request information\n self.flushAllActivities(loop_size=1000)\n self.assertEqual(calls, ['http://test.erp5.org:9080/virtual_root/test_obj'])\n finally:\n del Organisation.checkAbsoluteUrl\n\n def CheckLocalizerWorks(self, activity):\n FROM_STRING = 'Foo'\n TO_STRING = 'Bar'\n LANGUAGE = 'xx'\n def translationTest(context):\n from Products.ERP5Type.Message import Message\n context.setTitle(context.Base_translateString(FROM_STRING))\n context.setDescription(str(Message('erp5_ui', FROM_STRING)))\n portal = self.portal\n portal.Localizer.erp5_ui.manage_addLanguage(LANGUAGE)\n # Add FROM_STRING to the message catalog\n portal.Localizer.erp5_ui.gettext(FROM_STRING)\n # ...and translate it.\n portal.Localizer.erp5_ui.message_edit(message=FROM_STRING,\n language=LANGUAGE, translation=TO_STRING, note='')\n organisation = portal.organisation_module.newContent(\n portal_type='Organisation')\n self.tic()\n Organisation.translationTest = translationTest\n try:\n REQUEST = organisation.REQUEST\n # Simulate what a browser would have sent to Zope\n REQUEST.environ['HTTP_ACCEPT_LANGUAGE'] = LANGUAGE\n organisation.activate(activity=activity).translationTest()\n self.commit()\n # Remove request parameter to check that it was saved at activate call\n # and restored at message execution.\n del REQUEST.environ['HTTP_ACCEPT_LANGUAGE']\n self.tic()\n finally:\n del Organisation.translationTest\n self.assertEqual(TO_STRING, organisation.getTitle())\n self.assertEqual(TO_STRING, organisation.getDescription())\n\n def test_112_checkLocalizerWorksSQLQueue(self):\n self.CheckLocalizerWorks('SQLQueue')\n\n def test_113_checkLocalizerWorksSQLDict(self):\n self.CheckLocalizerWorks('SQLDict')\n\n def test_114_checkSQLQueueActivitySucceedsAfterActivityChangingSkin(self):\n portal = self.portal\n activity_tool = self.getActivityTool()\n # Check that a reference script can be reached\n script_id = 'ERP5Site_reindexAll'\n self.assertIsNot(getattr(portal, script_id), None)\n # Create a new skin selection\n skin_selection_name = 'test_114'\n portal.portal_skins.manage_skinLayers(add_skin=1, skinpath=[''], skinname=skin_selection_name)\n # Create a dummy document\n organisation = portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n # Set custom methods to call as activities.\n def first(context):\n context.changeSkin(skin_selection_name)\n if getattr(context, script_id, None) is not None:\n raise Exception('%s is not supposed to be found here.' % script_id)\n def second(context):\n # If the wrong skin is selected this will raise.\n getattr(context, script_id)\n Organisation.firstTest = first\n Organisation.secondTest = second\n try:\n organisation.activate(tag='foo', activity='SQLQueue').firstTest()\n organisation.activate(after_tag='foo', activity='SQLQueue').secondTest()\n self.commit()\n gc.disable()\n self.tic()\n gc.enable()\n # Forcibly restore skin selection, otherwise getMessageList would only\n # emit a log when retrieving the ZSQLMethod.\n portal.changeSkin(None)\n finally:\n del Organisation.firstTest\n del Organisation.secondTest\n\n def test_115_checkProcessShutdown(self):\n # Thread execution plan for this test:\n # main ActivityThread ProcessShutdownThread\n # start ActivityThread None None\n # wait for rendez_vous_lock (run) None\n # wait for rendez_vous_lock release rendez_vous_lock None\n # start ProcessShutdownThread wait for activity_lock None\n # release activity_lock wait for activity_lock internal wait\n # wait for activity_thread (finish) internal wait\n # wait for process_shutdown_thread None (finish)\n #\n # This test only checks that:\n # - activity tool can exit between 2 processable activity batches\n # - activity tool won't process activities after process_shutdown was called\n # - process_shutdown returns before Activity.tic()\n # This is not perfect though, since it would require to have access to\n # the waiting queue of CMFActivity's internal lock (is_running_lock) to\n # make sure that it's what is preventing process_shutdown from returning.\n activity_tool = self.getActivityTool()\n organisation = self.portal.organisation_module.newContent(\n portal_type='Organisation')\n self.tic()\n activity_event = threading.Event()\n rendez_vous_event = threading.Event()\n def waitingActivity(context):\n # Inform test that we arrived at rendez-vous.\n rendez_vous_event.set()\n # When this event is available, it means test has called process_shutdown.\n assert activity_event.wait(10)\n original_dequeue = SQLDict.dequeueMessage\n queue_tic_test_dict = {}\n def dequeueMessage(self, activity_tool, processing_node, node_family_id_set):\n # This is a one-shot method, revert after execution\n SQLDict.dequeueMessage = original_dequeue\n result = self.dequeueMessage(activity_tool, processing_node, node_family_id_set)\n queue_tic_test_dict['isAlive'] = process_shutdown_thread.isAlive()\n return result\n SQLDict.dequeueMessage = dequeueMessage\n Organisation.waitingActivity = waitingActivity\n try:\n # Use SQLDict with no group method so that both activities won't be\n # executed in the same batch, letting activity tool a chance to check\n # if execution should stop processing activities.\n organisation.activate(activity='SQLDict', tag='foo').waitingActivity()\n organisation.activate(activity='SQLDict', after_tag='foo').getTitle()\n self.commit()\n self.assertEqual(len(activity_tool.getMessageList()), 2)\n activity_tool.distribute()\n self.commit()\n\n # Start a tic in another thread, so they can meet at rendez-vous.\n class ActivityThread(threading.Thread):\n def run(self):\n # Call changeskin, since skin selection depend on thread id, and we\n # are in a new thread.\n activity_tool.changeSkin(None)\n activity_tool.tic()\n activity_thread = ActivityThread()\n # Do not try to outlive main thread.\n activity_thread.setDaemon(True)\n # Call process_shutdown in yet another thread because it will wait for\n # running activity to complete before returning, and we need to unlock\n # activity *after* calling process_shutdown to make sure the next\n # activity won't be executed.\n class ProcessShutdownThread(threading.Thread):\n def run(self):\n activity_tool.process_shutdown(3, 0)\n process_shutdown_thread = ProcessShutdownThread()\n # Do not try to outlive main thread.\n process_shutdown_thread.setDaemon(True)\n\n activity_thread.start()\n # Wait at rendez-vous for activity to arrive.\n assert rendez_vous_event.wait(10)\n # Initiate shutdown\n process_shutdown_thread.start()\n try:\n # Let waiting activity finish and wait for thread exit\n activity_event.set()\n activity_thread.join(10)\n assert not activity_thread.is_alive()\n process_shutdown_thread.join(10)\n assert not process_shutdown_thread.is_alive()\n # Check that there is still one activity pending\n message_list = activity_tool.getMessageList()\n self.assertEqual(len(message_list), 1)\n self.assertEqual(message_list[0].method_id, 'getTitle')\n # Check that process_shutdown_thread was still runing when Queue_tic returned.\n self.assertTrue(queue_tic_test_dict.get('isAlive'), repr(queue_tic_test_dict))\n # Call tic in foreground. This must not lead to activity execution.\n activity_tool.tic()\n self.assertEqual(len(activity_tool.getMessageList()), 1)\n finally:\n # Put activity tool back in a working state\n try:\n cancelProcessShutdown()\n except StandardException:\n # If something failed in process_shutdown, shutdown lock might not\n # be taken in CMFActivity, leading to a new esception here hiding\n # test error.\n pass\n finally:\n del Organisation.waitingActivity\n SQLDict.dequeueMessage = original_dequeue\n self.tic()\n\n def test_hasActivity(self):\n active_object = self.portal.organisation_module.newContent(\n portal_type='Organisation')\n active_process = self.portal.portal_activities.newActiveProcess()\n self.tic()\n\n self.assertFalse(active_object.hasActivity())\n self.assertFalse(active_process.hasActivity())\n\n def test(obj, **kw):\n for activity in ActivityTool.activity_dict:\n active_object.activate(activity=activity, **kw).getTitle()\n self.commit()\n self.assertTrue(obj.hasActivity(), activity)\n self.tic()\n self.assertFalse(obj.hasActivity(), activity)\n\n test(active_object)\n test(active_process, active_process=active_process)\n test(active_process, active_process=active_process.getPath())\n\n @for_each_activity\n def test_hasErrorActivity_error(self, activity):\n # Monkey patch Organisation to add a failing method\n def failingMethod(self):\n raise ValueError('This method always fail')\n Organisation.failingMethod = failingMethod\n active_object = self.portal.organisation_module.newContent(\n portal_type='Organisation')\n active_process = self.portal.portal_activities.newActiveProcess()\n self.tic()\n\n\n self.assertFalse(active_object.hasErrorActivity())\n self.assertFalse(active_process.hasErrorActivity())\n\n active_object.activate(\n activity=activity, active_process=active_process).failingMethod()\n self.commit()\n # assert that any activity is created\n self.assertTrue(active_object.hasActivity())\n self.assertTrue(active_process.hasActivity())\n # assert that no error is reported\n self.assertFalse(active_object.hasErrorActivity())\n self.assertFalse(active_process.hasErrorActivity())\n self.flushAllActivities()\n # assert that any activity is created\n self.assertTrue(active_object.hasActivity())\n self.assertTrue(active_process.hasActivity())\n # assert that an error has been seen\n self.assertTrue(active_object.hasErrorActivity())\n self.assertTrue(active_process.hasErrorActivity())\n message, = self.getMessageList(activity)\n self.deleteMessageList(activity, [message])\n\n @for_each_activity\n def test_hasErrorActivity(self, activity):\n active_object = self.portal.organisation_module.newContent(\n portal_type='Organisation')\n active_process = self.portal.portal_activities.newActiveProcess()\n self.tic()\n\n self.assertFalse(active_object.hasErrorActivity())\n self.assertFalse(active_process.hasErrorActivity())\n\n active_object.activate(\n activity=activity, active_process=active_process).getTitle()\n self.commit()\n # assert that any activity is created\n self.assertTrue(active_object.hasActivity())\n self.assertTrue(active_process.hasActivity())\n # assert that no error is reported\n self.assertFalse(active_object.hasErrorActivity())\n self.assertFalse(active_process.hasErrorActivity())\n self.flushAllActivities()\n # assert that any activity is created\n self.assertFalse(active_object.hasActivity())\n self.assertFalse(active_process.hasActivity())\n # assert that no error is reported\n self.assertFalse(active_object.hasErrorActivity())\n self.assertFalse(active_process.hasErrorActivity())\n\n def test_active_object_hasActivity_does_not_catch_exceptions(self):\n \"\"\"\n Some time ago, hasActivity was doing a silent try/except, and this was\n a possible disaster for some projects. Here we make sure that if the\n SQL request fails, then the exception is not ignored\n \"\"\"\n active_object = self.portal.organisation_module.newContent(\n portal_type='Organisation')\n self.tic()\n self.assertFalse(active_object.hasActivity())\n\n # Monkey patch to induce any error artificially in the sql connection.\n def query(self, query_string,*args, **kw):\n raise ValueError\n\n from Products.ZMySQLDA.db import DB\n DB.original_query = DB.query\n try:\n active_object.activate().getTitle()\n self.commit()\n self.assertTrue(active_object.hasActivity())\n # Make the sql request not working\n DB.original_query = DB.query\n DB.query = query\n # Make sure then that hasActivity fails\n self.assertRaises(ValueError, active_object.hasActivity)\n finally:\n DB.query = DB.original_query\n del DB.original_query\n self.tic()\n\n def test_insert_max_payload(self):\n activity_tool = self.portal.portal_activities\n # XXX: For unknown reasons, this test runs faster after the tables are\n # recreated. We could also make this test run before all others.\n activity_tool.manageClearActivities()\n self.commit()\n max_allowed_packet = activity_tool.getSQLConnection().getMaxAllowedPacket()\n insert_list = []\n invoke_list = []\n N = 100\n class Skip(Exception):\n \"\"\"\n Speed up test by not interrupting the first transaction\n as soon as we have the information we want.\n \"\"\"\n original_query = six.get_unbound_function(DB.query)\n def query(self, query_string, *args, **kw):\n if query_string.startswith('INSERT'):\n insert_list.append(len(query_string))\n if not n:\n raise Skip\n return original_query(self, query_string, *args, **kw)\n def check():\n for i in xrange(1, N):\n activity_tool.activate(activity=activity, group_id=str(i)\n ).doSomething(arg)\n activity_tool.activate(activity=activity, group_id='~'\n ).doSomething(' ' * n)\n self.tic()\n self.assertEqual(len(invoke_list), N)\n invoke_list.remove(n)\n self.assertEqual(set(invoke_list), {len(arg)})\n del invoke_list[:]\n activity_tool.__class__.doSomething = \\\n lambda self, arg: invoke_list.append(len(arg))\n try:\n DB.query = query\n for activity in ActivityTool.activity_dict:\n arg = ' ' * (max_allowed_packet // N)\n # Find the size of the last message argument, such that all messages\n # are inserted in a single query whose size is to the maximum allowed.\n n = 0\n self.assertRaises(Skip, check)\n self.abort()\n n = max_allowed_packet - insert_list.pop()\n self.assertFalse(insert_list)\n # Now check with the biggest insert query possible.\n check()\n self.assertEqual(max_allowed_packet, insert_list.pop())\n self.assertFalse(insert_list)\n # And check that the insert query is split\n # in order not to exceed max_allowed_packet.\n n += 1\n check()\n self.assertEqual(len(insert_list), 2)\n del insert_list[:]\n finally:\n del activity_tool.__class__.doSomething\n DB.query = original_query\n\n def test_115_TestSerializationTagSQLDictPreventsParallelExecution(self):\n \"\"\"\n Test if there are multiple activities with the same serialization tag,\n then serialization tag guarantees that only one of the same serialization\n tagged activities can be processed at the same time.\n \"\"\"\n portal = self.portal\n activity_tool = portal.portal_activities\n\n # Add 6 activities\n portal.organisation_module.activate(activity='SQLDict', tag='', serialization_tag='test_115').getId()\n self.commit()\n portal.organisation_module.activate(activity='SQLDict', serialization_tag='test_115').getTitle()\n self.commit()\n portal.organisation_module.activate(activity='SQLDict', tag='tag_1', serialization_tag='test_115').getId()\n self.commit()\n portal.person_module.activate(activity='SQLDict', serialization_tag='test_115').getId()\n self.commit()\n portal.person_module.activate(activity='SQLDict', tag='tag_2').getId()\n self.commit()\n portal.organisation_module.activate(activity='SQLDict', tag='', serialization_tag='test_115').getId()\n self.commit()\n\n # distribute and assign them to 3 nodes\n activity_tool.distribute()\n self.commit()\n\n activity = ActivityTool.activity_dict['SQLDict']\n activity.getProcessableMessageList(activity_tool, 1, ())\n self.commit()\n activity.getProcessableMessageList(activity_tool, 2, ())\n self.commit()\n activity.getProcessableMessageList(activity_tool, 3, ())\n self.commit()\n\n result = activity._getMessageList(activity_tool.getSQLConnection())\n try:\n self.assertEqual(len([message\n for message in result\n if (message.processing_node>0 and\n message.serialization_tag=='test_115')]),\n 1)\n\n self.assertEqual(len([message\n for message in result\n if (message.processing_node==-1 and\n message.serialization_tag=='test_115')]),\n 4)\n\n self.assertEqual(len([message\n for message in result\n if (message.processing_node>0 and\n message.serialization_tag=='')]),\n 1)\n finally:\n # Clear activities from all nodes\n self.deleteMessageList('SQLDict', result)\n\n def test_116_RaiseInCommitBeforeMessageExecution(self):\n \"\"\"\n Test behaviour of CMFActivity when the commit just before message\n execution fails. In particular, it should restart the messages it\n selected (processing_node=current_node) instead of ignoring them forever.\n \"\"\"\n processed = []\n activity_tool = self.portal.portal_activities\n activity_tool.__class__.doSomething = processed.append\n try:\n for activity in ActivityTool.activity_dict:\n activity_tool.activate(activity=activity).doSomething(activity)\n self.commit()\n # Make first commit in dequeueMessage raise\n registerFailingTransactionManager()\n self.assertRaises(CommitFailed, activity_tool.tic)\n # Normally, the request stops here and Zope aborts the transaction\n self.abort()\n self.assertEqual(processed, [])\n # Activity is already reserved for current node. Check tic reselects it.\n activity_tool.tic()\n self.assertEqual(processed, [activity])\n del processed[:]\n finally:\n del activity_tool.__class__.doSomething\n\n def test_117_PlacelessDefaultReindexParameters(self):\n \"\"\"\n Test behaviour of PlacelessDefaultReindexParameters.\n \"\"\"\n portal = self.portal\n\n # Make a new Person object to make sure that the portal type\n # is migrated to an instance of a portal type class, otherwise\n # the portal type may generate an extra active object.\n portal.person_module.newContent(portal_type='Person')\n self.tic()\n\n original_reindex_parameters = portal.getPlacelessDefaultReindexParameters()\n if original_reindex_parameters is None:\n original_reindex_parameters = {}\n\n tag = 'SOME_RANDOM_TAG'\n activate_kw = original_reindex_parameters.get('activate_kw', {}).copy()\n activate_kw['tag'] = tag\n portal.setPlacelessDefaultReindexParameters(activate_kw=activate_kw, \\\n **original_reindex_parameters)\n current_default_reindex_parameters = portal.getPlacelessDefaultReindexParameters()\n self.assertEqual({'activate_kw': {'tag': tag}}, \\\n current_default_reindex_parameters)\n person = portal.person_module.newContent(portal_type='Person')\n self.commit()\n # as we specified it in setPlacelessDefaultReindexParameters we should have\n # an activity for this tags\n self.assertEqual(1, portal.portal_activities.countMessageWithTag(tag))\n self.tic()\n self.assertEqual(0, portal.portal_activities.countMessageWithTag(tag))\n\n # restore originals ones\n portal.setPlacelessDefaultReindexParameters(**original_reindex_parameters)\n person = portal.person_module.newContent(portal_type='Person')\n # .. now no messages with this tag should apper\n self.assertEqual(0, portal.portal_activities.countMessageWithTag(tag))\n\n @for_each_activity\n def testTryNotificationSavedOnEventLogWhenNotifyUserRaises(self, activity):\n obj = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n original_notifyUser = six.get_unbound_function(Message.notifyUser)\n def failSendingEmail(self, *args, **kw):\n raise MailHostError('Mail is not sent')\n activity_unit_test_error = Exception()\n def failingMethod(self):\n raise activity_unit_test_error\n try:\n Message.notifyUser = failSendingEmail\n Organisation.failingMethod = failingMethod\n self._catch_log_errors()\n obj.activate(activity=activity, priority=6).failingMethod()\n self.commit()\n self.flushAllActivities(silent=1, loop_size=100)\n message, = self.getMessageList(activity)\n self.commit()\n for log_record in self.logged:\n if log_record.name == 'ActivityTool' and log_record.levelname == 'WARNING':\n type, value, trace = log_record.exc_info\n self.commit()\n self.assertIs(activity_unit_test_error, value)\n self.deleteMessageList(activity, [message])\n finally:\n Message.notifyUser = original_notifyUser\n del Organisation.failingMethod\n self._ignore_log_errors()\n\n @for_each_activity\n def testNotificationFailureIsNotSavedOnEventLogWhenMailNotificationIsDisabled(self, activity):\n obj = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n original_notifyUser = six.get_unbound_function(Message.notifyUser)\n def failSendingEmail(self, *args, **kw):\n raise MailHostError('Mail is not sent')\n activity_unit_test_error = Exception()\n def failingMethod(self):\n raise activity_unit_test_error\n try:\n self.portal.portal_activities.activity_failure_mail_notification = False\n Message.notifyUser = failSendingEmail\n Organisation.failingMethod = failingMethod\n self._catch_log_errors()\n obj.activate(activity=activity, priority=6).failingMethod()\n self.commit()\n self.flushAllActivities(silent=1, loop_size=100)\n message, = self.getMessageList(activity)\n self.commit()\n for log_record in self.logged:\n if log_record.name == 'ActivityTool' and log_record.levelname == 'WARNING':\n type, value, trace = log_record.exc_info\n self.commit()\n self.assertIs(activity_unit_test_error, value)\n self.deleteMessageList(activity, [message])\n finally:\n self.portal.portal_activities.activity_failure_mail_notification = True\n Message.notifyUser = original_notifyUser\n del Organisation.failingMethod\n self._ignore_log_errors()\n\n\n\n @for_each_activity\n def testTryUserMessageContainingNoTracebackIsStillSent(self, activity):\n # With Message.__call__\n # 1: activity context does not exist when activity is executed\n obj = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n notification_done = []\n def fake_notifyUser(self, *args, **kw):\n notification_done.append(True)\n self.traceback = None\n original_notifyUser = Message.notifyUser\n def failingMethod(self):\n raise ValueError(\"This method always fail\")\n Message.notifyUser = fake_notifyUser\n Organisation.failingMethod = failingMethod\n try:\n obj.activate(activity=activity).failingMethod()\n self.commit()\n self.flushAllActivities(silent=1, loop_size=100)\n message, = self.getMessageList(activity)\n self.assertEqual(len(notification_done), 1)\n self.assertEqual(message.traceback, None)\n message(self.getActivityTool())\n self.deleteMessageList(activity, [message])\n finally:\n Message.notifyUser = original_notifyUser\n del Organisation.failingMethod\n\n @for_each_activity\n def testTryNotificationSavedOnEventLogWhenSiteErrorLoggerRaises(self, activity):\n # Make sure that no active object is installed.\n o = self.getOrganisation()\n class ActivityUnitTestError(Exception):\n pass\n activity_unit_test_error = ActivityUnitTestError()\n def failingMethod(self):\n raise activity_unit_test_error\n from Products.SiteErrorLog.SiteErrorLog import SiteErrorLog\n original_raising = six.get_unbound_function(SiteErrorLog.raising)\n\n # Monkey patch Site Error to induce conflict errors artificially.\n def raising(self, info):\n raise AttributeError\n try:\n SiteErrorLog.raising = raising\n Organisation.failingMethod = failingMethod\n self._catch_log_errors()\n o.activate(activity = activity).failingMethod()\n self.commit()\n message, = self.getMessageList(activity)\n self.flushAllActivities(silent = 1)\n SiteErrorLog.raising = original_raising\n self.commit()\n for log_record in self.logged:\n if log_record.name == 'ActivityTool' and log_record.levelname == 'WARNING':\n type, value, trace = log_record.exc_info\n self.assertIs(activity_unit_test_error, value)\n self.deleteMessageList(activity, [message])\n finally:\n SiteErrorLog.raising = original_raising\n del Organisation.failingMethod\n self._ignore_log_errors()\n\n def test_128_CheckDistributeWithSerializationTagAndGroupMethodId(self):\n activity_tool = self.portal.portal_activities\n obj1 = activity_tool.newActiveProcess()\n obj2 = activity_tool.newActiveProcess()\n self.tic()\n group_method_call_list = []\n def doSomething(self, message_list):\n r = []\n for m in message_list:\n m.result = r.append((m.object.getPath(), m.args, m.kw))\n r.sort()\n group_method_call_list.append(r)\n activity_tool.__class__.doSomething = doSomething\n try:\n for activity in ActivityTool.activity_dict:\n activity_kw = dict(activity=activity, serialization_tag=self.id(),\n group_method_id='portal_activities/doSomething')\n obj1.activate(**activity_kw).dummy(1, x=None)\n obj2.activate(**activity_kw).dummy(2, y=None)\n self.commit()\n activity_tool.distribute()\n activity_tool.tic()\n self.assertEqual(group_method_call_list.pop(),\n sorted([(obj1.getPath(), (1,), dict(x=None)),\n (obj2.getPath(), (2,), dict(y=None))]))\n self.assertFalse(group_method_call_list)\n self.assertFalse(activity_tool.getMessageList())\n obj1.activate(priority=2, **activity_kw).dummy1(1, x=None)\n obj1.activate(priority=1, **activity_kw).dummy2(2, y=None)\n message1 = obj1.getPath(), (1,), dict(x=None)\n message2 = obj1.getPath(), (2,), dict(y=None)\n self.commit()\n activity_tool.distribute()\n self.assertEqual(len(activity_tool.getMessageList()), 2)\n activity_tool.tic()\n self.assertEqual(group_method_call_list.pop(),\n [message2] if activity != 'SQLQueue' else [message1, message2])\n self.assertFalse(group_method_call_list)\n finally:\n del activity_tool.__class__.doSomething\n\n def test_129_beforeCommitHook(self):\n \"\"\"\n Check it is possible to activate an object from a before commit hook\n \"\"\"\n def doSomething(person):\n person.activate(activity='SQLDict')._setFirstName('John')\n person.activate(activity='SQLQueue')._setLastName('Smith')\n person = self.portal.person_module.newContent()\n transaction.get().addBeforeCommitHook(doSomething, (person,))\n self.tic()\n self.assertEqual(person.getTitle(), 'John Smith')\n\n def test_connection_migration(self):\n \"\"\"\n Make sure the cmf_activity_sql_connection is automatically migrated from\n the ZMySQLDA Connection class to ActivityConnection\n \"\"\"\n # replace the activity connector with a standard ZMySQLDA one\n portal = self.portal\n activity_tool = portal.portal_activities\n stdconn = portal.cmf_activity_sql_connection\n portal._delObject('cmf_activity_sql_connection')\n portal.manage_addProduct['ZMySQLDA'].manage_addZMySQLConnection(\n stdconn.id,\n stdconn.title,\n stdconn.connection_string,\n )\n oldconn = portal.cmf_activity_sql_connection\n self.assertEqual(oldconn.meta_type, 'Z MySQL Database Connection')\n # force rebootstrap and check that migration of the connection happens\n # automatically\n from Products.ERP5Type.dynamic import portal_type_class\n portal_type_class._bootstrapped.clear()\n portal_type_class.synchronizeDynamicModules(activity_tool, True)\n activity_tool.activate(activity='SQLQueue').getId()\n self.tic()\n newconn = portal.cmf_activity_sql_connection\n self.assertEqual(newconn.meta_type, 'CMFActivity Database Connection')\n\n def test_connection_installable(self):\n \"\"\"\n Test if the cmf_activity_sql_connector can be installed\n \"\"\"\n # delete the activity connection\n portal = self.portal\n stdconn = portal.cmf_activity_sql_connection\n portal._delObject('cmf_activity_sql_connection')\n # check the installation form can be rendered\n portal.manage_addProduct['CMFActivity'].connectionAdd(\n portal.REQUEST\n )\n # check it can be installed\n portal.manage_addProduct['CMFActivity'].manage_addActivityConnection(\n stdconn.id,\n stdconn.title,\n stdconn.connection_string\n )\n newconn = portal.cmf_activity_sql_connection\n self.assertEqual(newconn.meta_type, 'CMFActivity Database Connection')\n\n def test_connection_sortkey(self):\n \"\"\"\n Check that SQL connection has properly initialized sort key,\n even when its container (ZODB connection) is reused by another thread.\n \"\"\"\n def sortKey():\n app = ZopeTestCase.app()\n try:\n c = app[self.getPortalName()].cmf_activity_sql_connection()\n return app._p_jar, c.sortKey()\n finally:\n ZopeTestCase.close(app)\n jar, sort_key = sortKey()\n self.assertNotEqual(1, sort_key)\n result = []\n t = threading.Thread(target=lambda: result.extend(sortKey()))\n t.daemon = True\n t.start()\n t.join()\n self.assertIs(result[0], jar)\n self.assertEqual(result[1], sort_key)\n\n def test_onErrorCallback(self):\n activity_tool = self.portal.portal_activities\n obj = activity_tool.newActiveProcess()\n self.tic()\n def _raise(exception): # I wish exceptions are callable raising themselves\n raise exception\n def doSomething(self, conflict_error, cancel):\n self.activity_count += 1\n error = ConflictError() if conflict_error else Exception()\n def onError(exc_type, exc_value, traceback):\n assert exc_value is error\n env = self.getActivityRuntimeEnvironment()\n weakref_list.extend(map(weakref.ref, (env, env._message)))\n self.on_error_count += 1\n return cancel\n self.getActivityRuntimeEnvironment().edit(on_error_callback=onError)\n if not self.on_error_count:\n if not conflict_error:\n raise error\n transaction.get().addBeforeCommitHook(_raise, (error,))\n obj.__class__.doSomething = doSomething\n try:\n for activity in ActivityTool.activity_dict:\n for conflict_error in False, True:\n weakref_list = []\n obj.activity_count = obj.on_error_count = 0\n obj.activate(activity=activity).doSomething(conflict_error, True)\n self.tic()\n self.assertEqual(obj.activity_count, 0)\n self.assertEqual(obj.on_error_count, 1)\n gc.collect()\n self.assertEqual([x() for x in weakref_list], [None, None])\n weakref_list = []\n obj.activate(activity=activity).doSomething(conflict_error, False)\n obj.on_error_count = 0\n self.tic()\n self.assertEqual(obj.activity_count, 1)\n self.assertEqual(obj.on_error_count, 1)\n gc.collect()\n self.assertEqual([x() for x in weakref_list], [None, None])\n finally:\n del obj.__class__.doSomething\n\n def test_duplicateGroupedMessage(self):\n activity_tool = self.portal.portal_activities\n obj = activity_tool.newActiveProcess()\n obj.reindexObject(activate_kw={'tag': 'foo', 'after_tag': 'bar'})\n self.commit()\n # Check that both messages were inserted.\n # Also serves as a sanity check on indexation activities group_method_id.\n indexation_group_metdod_id = 'portal_catalog/catalogObjectList'\n self.assertEqual(\n len([\n x\n for x in activity_tool.getMessageList(path=obj.getPath())\n if x.activity_kw.get('group_method_id') == indexation_group_metdod_id\n ]),\n 2,\n )\n invoked = []\n def invokeGroup(self, method_id, message_list, *args):\n # Ignore any other activity which may be spawned from these catalog\n # indexations (ex: fulltext indexations).\n if method_id == indexation_group_metdod_id:\n invoked.append(len(message_list))\n return ActivityTool_invokeGroup(self, method_id, message_list, *args)\n ActivityTool_invokeGroup = activity_tool.__class__.invokeGroup\n try:\n activity_tool.__class__.invokeGroup = invokeGroup\n self.tic()\n finally:\n activity_tool.__class__.invokeGroup = ActivityTool_invokeGroup\n self.assertEqual(invoked, [1])\n\n def test_mergeParent(self):\n category_tool = self.portal.portal_categories\n # Test data: c0\n # / \\\n # c1 c2\n # / \\ |\n # c3 c4 c5\n c = [category_tool.newContent()]\n for i in xrange(5):\n c.append(c[i//2].newContent())\n self.tic()\n def activate(i, priority=1, **kw):\n kw.setdefault('merge_parent', c[0].getPath())\n c[i].activate(priority=priority, **kw).doSomething()\n def check(*expected):\n self.tic()\n self.assertEqual(tuple(invoked), expected)\n del invoked[:]\n invoked = []\n def doSomething(self):\n invoked.append(c.index(self))\n Base.doSomething = doSomething\n try:\n for t in (0, 1), (0, 4, 2), (1, 0, 5), (3, 2, 0):\n for p, i in enumerate(t):\n activate(i, p)\n check(0)\n activate(1, 0); activate(5, 1); check(1, 5)\n activate(3, 0); activate(1, 1); check(1)\n activate(2, 0); activate(1, 1); activate(4, 2); check(2, 1)\n activate(4, 0); activate(5, 1); activate(3, 2); check(4, 5, 3)\n activate(3, 0, merge_parent=c[1].getPath()); activate(0, 1); check(3, 0)\n # Following test shows that a child can be merged with a parent even if\n # 'merge_parent' is not specified. This can't be avoided without loading\n # all found duplicates, which would be bad for performance.\n activate(0, 0); activate(4, 1, merge_parent=None); check(0)\n finally:\n del Base.doSomething\n def activate(i, priority=1, **kw):\n c[i].activate(group_method_id='portal_categories/invokeGroup',\n merge_parent=c[(i-1)//2 or i].getPath(),\n priority=priority, **kw).doSomething()\n def invokeGroup(self, message_list):\n r = []\n for m in message_list:\n m.result = r.append(c.index(m.object))\n r.sort()\n invoked.append(r)\n category_tool.__class__.invokeGroup = invokeGroup\n try:\n activate(5, 0); activate(1, 1); check([1, 5])\n activate(4, 0); activate(1, 1); activate(2, 0); check([1, 2])\n activate(1, 0); activate(5, 0); activate(3, 1); check([1, 5])\n for p, i in enumerate((5, 3, 2, 1, 4)):\n activate(i, p, group_id=str(2 != i != 5))\n check([2], [1])\n for cost in 0.3, 0.1:\n activate(2, 0, group_method_cost=cost)\n activate(3, 1); activate(4, 2); activate(1, 3)\n check([1, 2])\n finally:\n del category_tool.__class__.invokeGroup\n category_tool._delObject(c[0].getId())\n self.tic()\n\n def test_getMessageList(self):\n activity_tool = self.portal.portal_activities\n module = self.portal.person_module\n module.activate(after_tag=\"foo\").getUid()\n module.activate(activity='SQLQueue', tag=\"foo\").getId()\n activity_tool.activate(priority=-1).getId()\n def check(expected, **kw):\n self.assertEqual(expected, len(activity_tool.getMessageList(**kw)))\n def test(check=lambda _, **kw: check(0, **kw)):\n check(2, path=module.getPath())\n check(3, method_id=(\"getId\", \"getUid\"))\n check(1, tag=\"foo\")\n check(0, tag=\"foo\", method_id=\"getUid\")\n check(1, processing_node=-1)\n check(3, processing_node=range(-5,5))\n test()\n self.commit()\n test(check)\n self.tic()\n test()\n\n def test_MessageNonExecutable(self):\n message_list = self.portal.MailHost._message_list\n del message_list[:]\n activity_tool = self.portal.portal_activities\n\n kw = {}\n self._catch_log_errors()\n try:\n activity_tool.activity_failure_mail_notification = True\n for kw['activity'] in ActivityTool.activity_dict:\n for kw['group_method_id'] in '', None:\n obj = activity_tool.newActiveProcess()\n self.tic()\n obj.activate(**kw).getId()\n activity_tool._delOb(obj.getId())\n obj = activity_tool.newActiveProcess(id=obj.getId(),\n is_indexable=False)\n self.commit()\n self.assertEqual(1, activity_tool.countMessage())\n self.flushAllActivities()\n sender, recipients, mail = message_list.pop()\n self.assertIn('UID mismatch', mail)\n m, = activity_tool.getMessageList()\n self.assertEqual(m.processing_node, INVOKE_ERROR_STATE)\n obj.flushActivity()\n obj.activate(**kw).getId()\n activity_tool._delOb(obj.getId())\n self.commit()\n self.assertEqual(1, activity_tool.countMessage())\n activity_tool.tic()\n cmf_activty_log, = [log for log in self.logged if 'CMFActivity' in log.name]\n self.logged = []\n self.assertIn('no object found', cmf_activty_log.getMessage())\n finally:\n self._ignore_log_errors()\n self.assertFalse(message_list)\n\n def test_activateByPath(self):\n organisation = self.getOrganisation()\n self.portal.portal_activities.activateObject(\n organisation.getPath(),\n activity='SQLDict',\n active_process=None\n ).getTitle()\n self.tic()\n\n def test_activateOnZsqlBrain(self):\n organisation, = self.getOrganisationModule().searchFolder(\n id=self.company_id)\n organisation.activate().getTitle()\n self.tic()\n\n def test_flushActivitiesOnDelete(self):\n organisation = self.getOrganisation()\n organisation.getParentValue()._delObject(organisation.getId())\n organisation.activate().getTitle()\n self.tic()\n\n def test_flushActivitiesOnDeleteWithAcquierableObject(self):\n # Create an object with the same ID that can be acquired\n self.portal._setObject(self.company_id, Organisation(self.company_id))\n\n organisation = self.getOrganisation()\n organisation.getParentValue()._delObject(organisation.getId())\n organisation.reindexObject()\n self.tic()\n\n def test_failingGroupMethod(self):\n activity_tool = self.portal.portal_activities\n obj = activity_tool.newActiveProcess()\n self.tic()\n obj.x = 1\n def doSomething(self):\n self.x %= self.x\n obj.__class__.doSomething = doSomething\n try:\n activity_kw = dict(activity=\"SQLQueue\", group_method_id=None)\n obj.activate(**activity_kw).doSomething()\n obj.activate(**activity_kw).doSomething()\n obj.activate(**activity_kw).doSomething()\n self.commit()\n self.assertEqual(3, len(activity_tool.getMessageList()))\n activity_tool.tic()\n self.assertEqual(obj.x, 0)\n skipped, failed = activity_tool.getMessageList()\n self.assertEqual(0, skipped.retry)\n self.assertEqual(1, failed.retry)\n obj.x = 1\n self.commit()\n activity_tool.timeShift(VALIDATION_ERROR_DELAY)\n activity_tool.tic()\n m, = activity_tool.getMessageList()\n self.assertEqual(1, failed.retry)\n obj.x = 1\n self.commit()\n activity_tool.timeShift(VALIDATION_ERROR_DELAY)\n activity_tool.tic()\n finally:\n del obj.__class__.doSomething\n\n def test_restrictedGroupMethod(self):\n skin = self.portal.portal_skins.custom\n script_id = self.id()\n script = createZODBPythonScript(skin, script_id, \"message_list\", \"\"\"if 1:\n for m in message_list:\n m.result = m.object.getProperty(*m.args, **m.kw)\n \"\"\")\n script.manage_proxy((\"Manager\",))\n obj = self.portal.portal_activities.newActiveProcess(causality_value_list=(\n self.portal.person_module, self.portal.organisation_module))\n obj.manage_permission('Access contents information', ['Manager'])\n self.logout()\n foo = obj.activate(activity='SQLQueue',\n group_method_id=script_id,\n active_process=obj.getPath()).foo\n foo('causality', portal_type='Organisation Module')\n foo('stop_date', 'bar')\n self.tic()\n self.assertEqual(sorted(x.getResult() for x in obj.getResultList()),\n ['bar', 'organisation_module'])\n skin.manage_delObjects([script_id])\n self.tic()\n\n def test_getCurrentNode(self):\n current_node = getattr(getConfiguration(), 'product_config', {}) \\\n .get('cmfactivity', {}).get('node-id')\n if not current_node:\n current_node = getServerAddress()\n node = getCurrentNode()\n self.assertEqual(node, current_node)\n activity_node = self.portal.portal_activities.getCurrentNode()\n self.assertEqual(activity_node, current_node)\n\n def test_getServerAddress(self):\n host, port = self.startHTTPServer()\n ip = socket.gethostbyname(host)\n server_address = '%s:%s' % (ip, port)\n address = getServerAddress()\n self.assertEqual(address, server_address)\n activity_address = self.portal.portal_activities.getServerAddress()\n self.assertEqual(activity_address, server_address)\n\n def test_nodePreference(self):\n \"\"\"\n Test node preference, i.e. 'node' parameter of activate()\n An object is activated by 2 different nodes and the 2 messages are\n processed by the node that created the newest one:\n - without node preference: they're ordered by date\n - with node preference: they're executed in reverse order (the\n processing node executes its message first even if it's newer)\n Correct ordering of queues is also checked, by including scenarios\n in which one message is in SQLDict and the other in SQLQueue.\n \"\"\"\n activity_tool = self.portal.portal_activities\n o = self.getOrganisation()\n\n node_dict = dict(activity_tool.getNodeDict())\n assert len(node_dict) == 1 and '' not in node_dict, node_dict\n before = DateTime() - 1\n\n activities = 'SQLDict', 'SQLQueue'\n for activities in product(activities, activities):\n for node, expected in (None, '12'), ('', '21'), ('same', '12'):\n o._setTitle('0')\n # The dance around getNodeDict is to simulate the creation of\n # activities from 2 different nodes. We also change title in 2\n # different ways, so that SQLDict does not merge them.\n o.activate(activity=activities[0], node=node)._setTitle('1')\n activity_tool.getNodeDict = lambda: node_dict\n node_dict[''] = ActivityTool.ROLE_PROCESSING\n o.activate(activity=activities[1], node=node, at_date=before\n )._setProperty('title', '2')\n del node_dict['']\n activity_tool._p_invalidate()\n self.commit()\n\n for title in expected:\n self.ticOnce()\n self.assertEqual(o.getTitle(), title, (activities, expected))\n self.assertFalse(activity_tool.getMessageList())\n\n def test_nodeFamilies(self):\n \"\"\"\n Test node families, i.e. 'node' parameter of activate() beyond \"\", \"same\"\n and None.\n \"\"\"\n activity_tool = self.portal.portal_activities\n node_id, = activity_tool.getNodeDict()\n other = 'boo'\n member = 'foo'\n non_member = 'bar'\n does_not_exist = 'baz'\n\n # Family declaration API\n self.assertItemsEqual(activity_tool.getFamilyNameList(), [])\n self.assertRaises(\n ValueError,\n activity_tool.createFamily, 'same', # Reserved name\n )\n self.assertRaises(\n TypeError,\n activity_tool.createFamily, -5, # Not a string\n )\n activity_tool.createFamily(other)\n self.assertRaises(\n ValueError,\n activity_tool.createFamily, other, # Exists\n )\n activity_tool.createFamily(member)\n self.assertRaises(\n ValueError,\n activity_tool.renameFamily, other, member, # New name exists\n )\n self.assertRaises(\n ValueError,\n activity_tool.renameFamily, does_not_exist, member, # Old name does not exist\n )\n self.assertRaises(\n TypeError,\n activity_tool.renameFamily, other, -4, # New name not a string\n )\n activity_tool.deleteFamily(member)\n # Silent success\n activity_tool.deleteFamily(member)\n activity_tool.createFamily(non_member)\n self.assertItemsEqual(activity_tool.getFamilyNameList(), [other, non_member])\n\n # API for node a-/di-ssociation with/from families\n self.assertItemsEqual(activity_tool.getCurrentNodeFamilyNameSet(), [])\n activity_tool.addNodeToFamily(node_id, other)\n self.assertItemsEqual(activity_tool.getCurrentNodeFamilyNameSet(), [other])\n # Silent success\n activity_tool.addNodeToFamily(node_id, other)\n self.assertItemsEqual(activity_tool.getCurrentNodeFamilyNameSet(), [other])\n activity_tool.addNodeToFamily(node_id, non_member)\n self.assertItemsEqual(activity_tool.getCurrentNodeFamilyNameSet(), [other, non_member])\n activity_tool.removeNodeFromFamily(node_id, non_member)\n self.assertItemsEqual(activity_tool.getCurrentNodeFamilyNameSet(), [other])\n # Silent success\n activity_tool.removeNodeFromFamily(node_id, non_member)\n self.assertItemsEqual(activity_tool.getCurrentNodeFamilyNameSet(), [other])\n activity_tool.createFamily(does_not_exist)\n activity_tool.addNodeToFamily(node_id, does_not_exist)\n self.assertItemsEqual(activity_tool.getCurrentNodeFamilyNameSet(), [other, does_not_exist])\n activity_tool.deleteFamily(does_not_exist)\n self.assertItemsEqual(activity_tool.getCurrentNodeFamilyNameSet(), [other])\n self.assertItemsEqual(activity_tool.getFamilyNameList(), [other, non_member])\n activity_tool.renameFamily(other, member)\n self.assertItemsEqual(activity_tool.getFamilyNameList(), [member, non_member])\n self.assertItemsEqual(activity_tool.getCurrentNodeFamilyNameSet(), [member])\n activity_tool.createFamily(other)\n activity_tool.addNodeToFamily(node_id, other)\n self.assertItemsEqual(activity_tool.getFamilyNameList(), [member, non_member, other])\n self.assertItemsEqual(activity_tool.getCurrentNodeFamilyNameSet(), [member, other])\n activity_tool.deleteFamily(other)\n\n self.assertItemsEqual(activity_tool.getFamilyNameList(), [member, non_member])\n self.assertItemsEqual(activity_tool.getCurrentNodeFamilyNameSet(), [member])\n o = self.getOrganisation()\n for activity in 'SQLDict', 'SQLQueue':\n # Sanity check.\n self.assertEqual(self.getMessageList(activity), [])\n self.assertRaises(\n ValueError,\n o.activate, activity=activity, node=does_not_exist,\n )\n for node, expected in (member, '1'), (non_member, '0'), ('', '1'), ('same', '1'):\n o._setTitle('0')\n o.activate(activity=activity, node=node)._setTitle('1')\n self.commit()\n self.ticOnce()\n self.assertEqual(\n o.getTitle(),\n expected,\n (activity, o.getTitle(), expected),\n )\n if expected == '0':\n # The activity must still exist, waiting for a node of the\n # appropriate family.\n result = self.getMessageList(activity)\n self.assertEqual(len(result), 1)\n self.deleteMessageList(activity, result)\n\n def test_message_auto_validation(self):\n \"\"\"\n Test that messages without dependencies are directly spawned with\n processing_node=0.\n \"\"\"\n organisation = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n activity_tool = self.getActivityTool()\n organisation.activate(tag='1').getId()\n organisation.activate(tag='2', after_tag=None).getId()\n organisation.activate(tag='3', after_tag='foo').getId()\n self.commit()\n activity_tool.getMessageList()\n self.assertItemsEqual(\n [('1', 0), ('2', 0), ('3', -1)],\n [\n (x.activity_kw['tag'], x.processing_node)\n for x in self.getActivityTool().getMessageList()\n ],\n )\n self.tic()\n\n def test_activity_timeout(self):\n slow_method_id = 'Base_getSlowObjectList'\n createZODBPythonScript(\n self.portal.portal_skins.custom,\n slow_method_id,\n 'selection=None, **kw',\n \"\"\"\nfrom time import sleep\nsleep(3)\nreturn [x.getObject() for x in context.portal_catalog(limit=100)]\n \"\"\")\n\n # Set short enough activity timeout configuration\n import Products.ERP5Type.Timeout\n Products.ERP5Type.Timeout.activity_timeout = 2.0\n\n self.portal.portal_templates.activate().Base_getSlowObjectList()\n with self.assertRaises(RuntimeError):\n self.tic()\n message, = self.getMessageList('SQLDict')\n self.assertEqual(message.retry, 0)\n self.deleteMessageList(\n 'SQLDict',\n [message],\n )\n\n def test_zmi_views(self):\n # we can render ZMI view without errors or warnings\n with warnings.catch_warnings(record=True) as catched_warnings:\n self.portal.portal_activities.manage_overview()\n self.portal.portal_activities.manageActivities()\n self.portal.portal_activities.manageActivitiesAdvanced()\n self.portal.portal_activities.manageLoadBalancing()\n self.assertEqual(catched_warnings, [])\n\n @for_each_activity\n def testSpawnTimeUserGroupAndRoleUsedDuringExecution(self, activity):\n obj = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n # This user cannot be created by userfolder API, validating that activity\n # execution does not use it.\n # Using a PropertiedUser because it is the lowest-level class which has a\n # groups notion.\n artificial_user = PropertiedUser(\n id='this user does not exist',\n login='does not matter',\n ).__of__(self.portal.acl_users)\n artificial_user._addGroups(groups=('group 1', 'group 2'))\n artificial_user._addRoles(roles=('role 1', 'role 2'))\n initial_security_manager = getSecurityManager()\n def checkUserGroupAndRole(organisation_self):\n user = getSecurityManager().getUser()\n self.assertIs(type(aq_base(user)), PropertiedUser)\n self.assertEqual(aq_parent(user), aq_parent(artificial_user))\n self.assertEqual(user.getId(), artificial_user.getId())\n self.assertItemsEqual(user.getGroups(), artificial_user.getGroups())\n self.assertItemsEqual(user.getRoles(), artificial_user.getRoles())\n Organisation.checkUserGroupAndRole = checkUserGroupAndRole\n try:\n newSecurityManager(None, artificial_user)\n obj.activate(activity=activity).checkUserGroupAndRole()\n self.tic()\n finally:\n setSecurityManager(initial_security_manager)\n del Organisation.checkUserGroupAndRole\n\n @for_each_activity\n def test_dummyGroupMethodUser(self, activity):\n activity_tool = self.portal.portal_activities\n user_folder = self.portal.acl_users\n expected_user_list = [\n PropertiedUser(id='user1', login='user1').__of__(user_folder),\n PropertiedUser(id='user2', login='user2').__of__(user_folder),\n ]\n for index, user in enumerate(expected_user_list):\n user._addGroups(groups=['role %i' % index])\n context_list = [\n self.portal.organisation_module.newContent(portal_type='Organisation')\n for _ in expected_user_list\n ]\n self.tic()\n user_list = [None for _ in expected_user_list]\n def doSomething(self, index):\n user_list[index] = getSecurityManager().getUser()\n Organisation.doSomething = doSomething\n try:\n initial_security_manager = getSecurityManager()\n try:\n for index, (context, user) in enumerate(zip(\n context_list,\n expected_user_list,\n )):\n newSecurityManager(None, user)\n context.activate(\n activity=activity,\n group_method_id=None,\n ).doSomething(index=index)\n finally:\n setSecurityManager(initial_security_manager)\n self.tic()\n finally:\n del Organisation.doSomething\n self.assertEqual(\n [x.getRoles() for x in user_list],\n [x.getRoles() for x in expected_user_list],\n )\n","repo_name":"Nexedi/erp5","sub_path":"product/CMFActivity/tests/testCMFActivity.py","file_name":"testCMFActivity.py","file_ext":"py","file_size_in_byte":113448,"program_lang":"python","lang":"en","doc_type":"code","stars":171,"dataset":"github-code","pt":"75"}
+{"seq_id":"14066948142","text":"# link: https://leetcode.com/problems/my-calendar-iii/\n# solution reference: https://leetcode.com/problems/my-calendar-iii/solution/\n\n# Sweep line algorithm\nfrom sortedcontainers import SortedDict\nclass MyCalendarThree:\n\n def __init__(self):\n self.diff = SortedDict()\n\n def book(self, start: int, end: int) -> int:\n self.diff[start] = self.diff.get(start, 0) + 1\n self.diff[end] = self.diff.get(end, 0) - 1\n\n cur = max_count = 0\n # to cummulate the count, the order matters here => use SortedDict\n for key in self.diff:\n cur += self.diff[key]\n max_count = max(max_count, cur)\n\n return max_count\n\n# Your MyCalendarThree object will be instantiated and called as such:\n# obj = MyCalendarThree()\n# param_1 = obj.book(start,end)\n","repo_name":"rbrn1999/leetcode-sol","sub_path":"problems/732. My Calendar III.py","file_name":"732. My Calendar III.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"32854228101","text":"import glob\nimport pandas as pd\nimport json\nfrom datetime import datetime\nimport os\nimport re\n\ndef normalize_tweet(tweet):\n if isinstance(tweet, str):\n tweet = tweet.lower()\n tweet = re.sub(r\"@[^\\s]+\", \"@user\", tweet) # Replace mentions with \"@user\"\n tweet = re.sub(r\"https?://[^\\s]+\", \"link\", tweet) # Replace URLs with \"link\"\n tweet = \" \".join(tweet.split()) # Remove extra whitespace\n else:\n tweet = str(tweet)\n\n return tweet\n\ndef process_files(folder_path, output_folder):\n # Retrieve file paths from the folder\n file_paths = glob.glob(folder_path)\n\n # Iterate over the file paths\n for file_path in file_paths:\n # Read the CSV file into a DataFrame\n df = pd.read_csv(file_path)\n\n # Apply the normalization function to each tweet\n df['normalized_tweet'] = df['full_text'].apply(normalize_tweet)\n\n # Print the first normalized tweet for demonstration\n print(df['normalized_tweet'][0])\n\n # Drop duplicates based on all columns\n df.drop_duplicates(keep='first', inplace=True)\n\n # Change the name of the file from \"staged\" to \"trusted\"\n file_name = os.path.basename(file_path)\n new_file_name = file_name.replace(\"staged\", \"trusted\")\n output_file = os.path.join(output_folder, new_file_name)\n\n # Save the DataFrame to a new CSV file\n df.to_csv(output_file, index=False)\n\ndef run_normalization(folder_path, output_folder):\n normalize_tweet(None)\n process_files(folder_path, output_folder)","repo_name":"Silly-Machine/twitter-data-engineering","sub_path":"src/data_normalization.py","file_name":"data_normalization.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"1098640982","text":"#!/usr/bin/env python\n\nimport rospy\nfrom geometry_msgs.msg import Vector3Stamped\nfrom std_msgs.msg import Float64\nfrom wind_sensor.wind_sensor import WindSensor\nimport subprocess as sp\n\n\nclass WindSensorTalker:\n def __init__(self):\n \"\"\"\n Initializes the WindSensor class that is defined in wind_sensor.py\n \"\"\"\n self.wnd = WindSensor()\n\n def talker(self):\n \"\"\"\n Initializes the 4 publishers(wind_pub, rpy_pub, temp_pub, battery_pub), then initializes the node\n as wind_sensor_node and sets the rate to the users defined value(8 is the max of the wind sensor) then sleeps to\n let the startup and bluetooth connection finish.\n\n Makes function calls to gather the sensor readings from the wind sensor and saves the to the correct formats for\n publishing with a check at the end that the bluetooth connectivity is still up and if it is not the connection\n is reset and a small sleep to wait for it to establish before continuing.\n \"\"\"\n wind_pub = rospy.Publisher('wind_sensor/wind_vector', Vector3Stamped, queue_size=10)\n rpy_pub = rospy.Publisher('wind_sensor/roll_pitch_yaw', Vector3Stamped, queue_size=10)\n temp_pub = rospy.Publisher('wind_sensor/temperature', Float64, queue_size=10)\n battery_pub = rospy.Publisher('wind_sensor/battery_voltage', Float64, queue_size=10)\n rospy.init_node('wind_sensor_node', anonymous=True, log_level=rospy.get_param(\"log_level\", rospy.INFO))\n rate = rospy.Rate(8) # refresh rate in hz\n rospy.sleep(5)\n while not rospy.is_shutdown():\n self.wnd.update()\n wind_vector = self.wnd.get_wind_vector()\n vec_msg = Vector3Stamped()\n vec_msg.header.stamp = rospy.Time.now()\n vec_msg.vector.x = -wind_vector[0]\n vec_msg.vector.y = -wind_vector[1]\n vec_msg.vector.z = 0\n\n rpy_vector = self.wnd.get_rpy()\n rpy_msg = Vector3Stamped()\n rpy_msg.header.stamp = rospy.Time.now()\n rpy_msg.vector.x = -rpy_vector[0]\n rpy_msg.vector.y = rpy_vector[1]\n rpy_msg.vector.z = -rpy_vector[2]\n\n battery_msg = Float64()\n battery_msg = self.wnd.get_battery_charge()\n\n temp_msg = Float64()\n temp_msg = self.wnd.get_temp()\n temp_msg -= 273.15 # convert to celsius from kelvin\n stdoutdata = sp.getoutput(\"hcitool con\")\n if \"DC:73:74:12:94:80\" not in stdoutdata.split():\n rospy.logerr(\"Connection Failed, Reconnecting!\")\n self.wnd.close()\n self.wnd = WindSensor()\n rospy.sleep(5)\n wind_pub.publish(vec_msg)\n rpy_pub.publish(rpy_msg)\n battery_pub.publish(battery_msg)\n temp_pub.publish(temp_msg)\n rate.sleep()\n\n\nif __name__ == '__main__':\n wnd = WindSensorTalker()\n try:\n wnd.talker()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"AutoSail-MDH/AutoSailROS","sub_path":"autosail/scripts/drivers/wind_sensor_node.py","file_name":"wind_sensor_node.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"}
+{"seq_id":"41588407926","text":"plotfilename=\"../files/matplotlib-3d-example.png\"\ninfile = \"../files/genfromtxt_example_data.txt\"\noufile = \"../files/genfromtxt_example_plot.png\"\nimport numpy\nimport matplotlib\nimport matplotlib.pyplot\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndef randrange(n, vmin, vmax):\n return (vmax - vmin)*numpy.random.rand(n) + vmin\n\ndata = numpy.genfromtxt(infile, comments=\"#\", delimiter=\"\\t\", skip_header=3)\nfig = matplotlib.pyplot.figure()\nax = fig.add_subplot(111, projection='3d')\nn = data.shape[0]\n# plot a sphere for each particle\n# colour charged particles red (charge>0), blue (charge<0) and neutrals green\nblues = data[data[:,7]<0]\nreds = data[data[:,7]>0]\ngreens=data[numpy.logical_not(numpy.logical_or(data[:,7]<0,data[:,7]>0))]\nax.scatter(blues[:,0], blues[:,1], blues[:,2], c=\"b\", edgecolors=\"face\",\n marker=\"o\", s=blues[:,6])\nax.scatter(reds[:,0], reds[:,1], reds[:,2], c=\"r\", edgecolors=\"face\",\n marker=\"o\", s=greens[:,6])\nax.scatter(greens[:,0], greens[:,1], greens[:,2], c=\"g\", edgecolors=\"face\",\n marker=\"o\", s=greens[:,6])\nax.quiver(blues[:,0], blues[:,1], blues[:,2], blues[:,3], blues[:,4],\n blues[:,5], pivot=\"tail\")\nax.quiver(reds[:,0], reds[:,1], reds[:,2], reds[:,3], reds[:,4],\n reds[:,5], pivot=\"middle\")\nax.quiver(greens[:,0], greens[:,1], greens[:,2], greens[:,3], greens[:,4],\n greens[:,5], pivot=\"tip\")\nax.set_xlabel('X Label')\nax.set_ylabel('Y Label')\nax.set_zlabel('Z Label')\nmatplotlib.pyplot.savefig(oufile)\nprint(oufile, end=\"\")\n","repo_name":"COSMOS-CTC-Cambridge/damtp-research-programming","sub_path":"codes/python/genfromtxt_example_plot.py","file_name":"genfromtxt_example_plot.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"75"}
+{"seq_id":"14710490296","text":"from typing import Sequence, Union, List, Any\nfrom nornir.core.task import Result, Task\nfrom nornir_netmiko.connections import CONNECTION_NAME\n\n\ndef netmiko_multiline(\n task: Task,\n commands: Sequence[Union[str, List[str]]],\n use_timing: bool = False,\n enable: bool = False,\n **kwargs: Any\n) -> Result:\n \"\"\"\n Execute Netmiko send_multiline method (or send_multiline_timing)\n\n Arguments:\n commands: List or list of lists (see Netmiko send_multiline)\n use_timing: Set to True to switch to send_multiline_timing method.\n enable: Set to True to force Netmiko .enable() call.\n kwargs: Additional arguments to pass to send_multiline method.\n\n Returns:\n Result object with the following attributes set:\n * result: String result showing you the output from commands\n \"\"\"\n net_connect = task.host.get_connection(CONNECTION_NAME, task.nornir.config)\n if enable:\n net_connect.enable()\n if use_timing:\n result = net_connect.send_multiline_timing(commands, **kwargs)\n else:\n result = net_connect.send_multiline(commands, **kwargs)\n return Result(host=task.host, result=result)\n","repo_name":"ktbyers/nornir_netmiko","sub_path":"nornir_netmiko/tasks/netmiko_multiline.py","file_name":"netmiko_multiline.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"75"}
+{"seq_id":"17046472764","text":"import datetime\nimport random\nimport json\nimport mutagen\nimport os\n\nclass Song:\n\n\tdef __init__(self, title, artist, album, length):\n\t\tself.title = title\n\t\tself.artist = artist\n\t\tself.album = album\n\t\tself.length = length\n\n\t@property\n\tdef is_valid_length(self):\n\t\ti = 0\n\t\twhile i < len(self.length):\n\t\t\tif not self.length[i].isdigit() and not self.length[i] == ':':\n\t\t\t\t\treturn False\n\t\t\ti += 1\n\n\t\tarray = self.length.split(':')\n\t\ti = 0\n\t\twhile i < len(array):\n\t\t\tif array[i] < '0' or array[i] > '59':\n\t\t\t\treturn False\n\t\t\ti += 1\n\t\tif len(array) == 2:\n\t\t\tif int(array[0]) >= 0 and int(array[1]) > 0 and int(array[1]) < 60:\n\t\t\t\t\treturn True\n\n\t\tif len(array) == 3:\n\t\t\tif int(array[0]) > 0 and int(array[1]) > 0 and int(array[1]) < 60 and int(array[2]) > 0 and int(array[2]) < 60:\n\t\t\t\t\treturn True\n\n\tdef __str__(self):\n\t\tif self.is_valid_length:\n\t\t\treturn \"{} {} '{}' {}\".format(self.title, self.artist, self.album, self.length)\n\t\traise Exception(\"Invalid length !!!\")\n\n\tdef __repr__(self):\n\t\treturn str(self)\n\n\tdef __eq__(self, other):\n\t\treturn self.title == other.title and self.artist == other.artist and self.album == other.album and self.length == other.length\n\n\tdef __hash__(self):\n\t\treturn hash((self.title, self.artist, self.album, self.length))\n\t\n\tdef get_length(self, seconds = False, minutes = False, hours = False):\n\t\ttime_parts = self.length.split(':')\n\t\tif len(time_parts) == 2:\n\t\t\tlength_in_seconds = int(time_parts[1]) + int(time_parts[0])*60\n\t\t\tlength_in_minutes = int(time_parts[0])\n\t\t\tlength_in_hours = 0\n\t\tif len(time_parts) == 3:\n\t\t\tlength_in_hours = int(time_parts[0])\n\t\t\tlength_in_minutes = int(time_parts[0])*60 + int(time_parts[1])\n\t\t\tlength_in_seconds = int(time_parts[2]) + int(length_in_minutes*60)\n\t\tif seconds:\n\t\t\treturn length_in_seconds\n\t\tif minutes:\n\t\t\treturn length_in_minutes\n\t\tif hours:\n\t\t\treturn length_in_hours\n\t\telse:\n\t\t\treturn(str(self.length))\n\nclass Playlist:\n\n\tdef __init__(self, name, repeat=False, shuffle=False):\n\t\tself.name = name\n\t\tself.repeat = repeat\n\t\tself.shuffle = shuffle\n\t\tself.songs = []\n\t\tself.songs_location = {}\n\t\tself.song_ind = 0\n\n\tdef add_song(self, song):\n\t\tif isinstance(song, Song):\n\t\t\tself.songs.append(song)\n\t\treturn self.songs\n\n\tdef remove_song(self, song):\n\t\ti = 0\n\t\twhile self.songs[i] != song:\n\t\t\ti += 1\n\t\tj = i\n\t\twhile j < len(self.songs)-1:\n\t\t\tself.songs[j]=self.songs[j+1]\n\t\t\tj += 1\n\n\t\tself.songs = self.songs[0:len(self.songs)-1]\n\t\treturn self.songs\n\n\tdef total_length(self):\n\t\ttotal = \"\"\n\t\thours = 0\n\t\tminutes = 0\n\t\tseconds = 0\n\t\tfor i in range(0, len(self.songs)):\n\t\t\tcurrent_time_array = self.songs[i].length.split(':')\n\t\t\tif len(current_time_array)==2:\n\t\t\t\tminutes += int(current_time_array[0])\n\t\t\t\tseconds += int(current_time_array[1])\n\t\t\tif len(current_time_array)==3:\n\t\t\t\thours += int(current_time_array[0])\n\t\t\t\tminutes += int(current_time_array[1])\n\t\t\t\tseconds += int(current_time_array[2])\n\t\ts = seconds % 60\n\t\tnew_m = (seconds - s)//60\n\t\tminutes += new_m\n\t\tm = minutes % 60\n\t\tnew_h = (minutes - m)//60\n\t\thours += new_h\n\t\tif hours > 0:\n\t\t\ttotal += str(hours) + \":\"\n\t\ttotal += str(m) + \":\" + str(s)\n\n\t\treturn total\n\n\tdef artists(self):\n\t\tdata = []\n\t\tfor i in range(0, len(self.songs)):\n\t\t\tartist = self.songs[i].artist\n\t\t\tsong = self.songs[i].title\n\t\t\tdata.append((artist, song))\n\t\thist = {}\n\t\tfor i in data:\n\t\t\thist[i] = hist.get(i,0)+1\n\t\tresult_hist = {}\n\t\tfor i in hist:\n\t\t\tresult_hist[i[0]] = hist[(i[0],i[1])]\n\n\t\treturn result_hist\n\n\tdef next_song(self):\n\t\tif self.song_ind == len(self.songs) - 1:\n\t\t\tif self.repeat == True:\n\t\t\t\tself.song_ind = 0\n\n\t\tif self.shuffle == True:\n\t\t\tself.song_ind = random.randint(0, len(self.songs)-1)\n\t\t\t\n\t\treturn self.songs[self.song_ind]\n\n\n\tdef print_playlist(self):\n\t\tprint(\"| \", \"Artist \", \" | \", \" Song \", \" | \", \" Length \", \" |\" )\n\t\tprint(\"|\", \"--------\", \" | \", \"------------------\", \" | \", \"--------\", \" |\" )\n\t\tfor i in range(0,len(self.songs)):\n\t\t\tres = \"| \" + str(self.songs[i].artist)+ \" | \"\n\t\t\ttitle_space = len(\"------------------\")- len(str(self.songs[i].title))\n\t\t\tres += str(self.songs[i].title)\n\t\t\twhile title_space > 0:\n\t\t\t\tres += \" \"\n\t\t\t\ttitle_space -= 1\n\t\t\tres += \" | \"\n\n\t\t\ttitle_space = len(\"-------- \")- len(str(self.songs[i].length))\n\t\t\tres += str(self.songs[i].length)\n\t\t\twhile title_space > 0:\n\t\t\t\tres += \" \"\n\t\t\t\ttitle_space -= 1\n\t\t\tres += \" |\"\n\n\t\t\tprint(res)\n\n\n\tdef save(self):\n\t\td = {}\n\t\tfor i in range(0, len(self.songs)):\n\t\t\tdata = {}\n\t\t\tdata[\"title\"] = self.songs[i].title\n\t\t\tdata[\"artist\"] = self.songs[i].artist\n\t\t\tdata[\"album\"] = self.songs[i].album\n\t\t\tdata[\"length\"] = self.songs[i].length\n\t\t\td[i] = data\n\t\t\n\t\tfilename = \"\"\n\t\tfor i in range(0, len(self.name)):\n\t\t\tif self.name[i]==\" \":\n\t\t\t\tfilename += \"-\"\n\t\t\telse:\n\t\t\t\tfilename += self.name[i]\n\t\tfilename += \".json\"\n\t\twith open(filename,'w') as f:\n\t\t\tjson.dump(d,f)\n\t\treturn filename\n\n\t@classmethod\n\tdef load(file_name):\n\t\twith open(file_name, 'r') as f:\n\t\t\tcontent = json.load(f)\n\t\t\tplaylist = Playlist(content[\"name\"])\n\t\t\tfor song in content[\"songs\"]:\n\t\t\t\tnew_song = Song(\n\t\t\t\t\tartist=song[\"artist\"], title=song[\"title\"], album=song[\"album\"], length=song[\"length\"])\n\t\t\t\tplaylist.add_song(new_song)\n\t\t\treturn playlist\n\n\tdef add_location(self, song, location):\n\t\tself.songs_location[song] = location\n\n\nclass MusicCrawler:\n\n\tdef __init__(self, path):\n\t\tself.path = path\n\n\tdef get_info(self, data):\n\t\tsong_data = {}\n\t\tsong_data[\"artist\"] = data.get([\"ARTIST\"].text[0], \"Unknown\")\n\t\tsong_data[\"album\"] = data.get([\"ALBUM\"].text[0], \"Unknown\")\n\t\tsong_data[\"title\"] = data.get([\"TITLE\"].text[0], \"Unknown\")\n\t\ttry:\n\t\t\tsong_data[\"length\"] = str(\n\t\t\t\tdatetime.timedelta(seconds=data.info.length//1))[2:]\n\t\texcept:\n\t\t\tsong_data[\"length\"] = \"Unknown\"\n\t\treturn song_data\n\n\tdef generate_playlist(self, name):\n\t\tplaylist = Playlist(name)\n\t\tsongs = [mp3 for mp3 in os.listdir(self.path) if mp3.endswith(\".mp3\")]\n\t\tfor song in songs:\n\t\t\tdata = mutagen.File(self.path + \"/\" + song)\n\t\t\tinfo = self.get_info(data)\n\t\t\tnew_song = Song(\n\t\t\t\tartist=info[\"artist\"], title=info[\"title\"], album=info[\"album\"], length=info[\"length\"])\n\t\t\tplaylist.add_song(new_song)\n\t\t\tplaylist.add_location(new_song, self.path + \"/\" + song)\n\t\treturn playlist\n","repo_name":"angelavelinova/Programming-101","sub_path":"week05/03.MusicLibrary/music_library.py","file_name":"music_library.py","file_ext":"py","file_size_in_byte":6131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"29240514382","text":"#!/usr/bin/python3\n\nimport json\nimport os\nimport shutil\nimport tempfile\nimport subprocess\nfrom optparse import OptionParser\n\ndef get_model_machies():\n raw = subprocess.check_output('juju machines --format json',shell=True)\n ret = []\n json_machines = json.loads(raw)\n for id in json_machines['machines']:\n name = json_machines['machines'][id]['display-name']\n ret.append( (id,name) )\n return ret\n\ndef write_collector_script(install_lldp = True):\n ftmp, ftmpname = tempfile.mkstemp()\n header = \"#!/bin/bash\\n\"\n install = \"\"\"\n for interface in `ls /sys/kernel/debug/i40e`\n do echo \"lldp stop\" > /sys/kernel/debug/i40e/${interface}/command\n done\n apt install lldpd -y;\n \"\"\"\n collect = \"lldpcli show neighbors details -f json > /tmp/lldp_output.json\\n\"\n body = header\n if install_lldp:\n body += install\n body += \"\\n\"\n body += collect\n os.write(ftmp,body.encode('utf-8'))\n os.close(ftmp)\n return ftmpname\n\ndef copy_script(machine, script_name):\n subprocess.run(\"juju scp {script_name} {machine}:{script_name}\".format(machine = machine, script_name = script_name), shell = True)\n\ndef run_script(machine, script_name):\n subprocess.run(\"juju ssh {machine} \\\"chmod 700 {script_name}; sudo {script_name}; rm {script_name}\\\"\"\n .format(machine = machine, script_name = script_name), shell = True)\n\ndef collect_data(machine_id, hostname, work_dir):\n subprocess.run(\"juju scp {machine_id}:/tmp/lldp_output.json {work_dir}/{hostname}.json\"\n .format(machine_id = machine_id, hostname = hostname, work_dir = work_dir), shell = True)\n\ndef main(options):\n if os.path.isdir(options.work_dir):\n shutil.rmtree(options.work_dir)\n os.mkdir(options.work_dir)\n script_name = write_collector_script(options.install_lldp)\n for machine in get_model_machies():\n id = machine[0]\n hostname = machine[1]\n copy_script(id, script_name)\n run_script(id, script_name)\n collect_data(id, hostname, options.work_dir)\n os.remove(script_name)\n\nif __name__ == \"__main__\":\n usage = \"usage: %prog [options] arg1 arg2\"\n parser = OptionParser(usage=usage)\n parser.add_option(\"-d\", \"--dir\",\n action=\"store\", type=\"string\", dest=\"work_dir\", default=\"/tmp/lldp\", help=\"Output directory\")\n parser.add_option(\"-i\", \"--install\",\n action=\"store_true\", dest=\"install_lldp\", default=False, help=\"Install LLDP tools first\") \n (options, args) = parser.parse_args() \n main(options)","repo_name":"majduk/net-surveyor","sub_path":"collect-lldp-juju.py","file_name":"collect-lldp-juju.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"75"}
+{"seq_id":"1729943712","text":"'''\nGiven two arrays X and Y of positive integers, find number of pairs such that xy > yx (raised to power of) where x is an element from X and y is an element from Y.\n\nInput:\nThe first line of input contains an integer T, denoting the number of test cases. Then T test cases follow. Each test consists of three lines. The first line of each test case consists of two space separated M and N denoting size of arrays X and Y respectively. The second line of each test case contains M space separated integers denoting the elements of array X. The third line of each test case contains N space separated integers denoting elements of array Y.\n\nOutput:\nCorresponding to each test case, print in a new line, the number of pairs such that xy > yx.\n\nConstraints:\n1 ≤ T ≤ 100\n1 ≤ M, N ≤ 105\n1 ≤ X[i], Y[i] ≤ 103\n\nExample:\nInput\n1\n3 2\n2 1 6\n1 5\n\nOutput\n3\n'''\n#DCP11\ndef comopare_power(a,b,m,n):\n count = 0\n for i in range(len(a)):\n for j in range(len(b)):\n if pow(a[i],b[j])>pow(b[j],a[i]):\n count += 1\n return count\n\nif __name__ == \"__main__\":\n ans = 0\n ans_list = []\n test_cases = int(input())\n for i in range(test_cases):\n n , m = map(int,input().split())\n a = list(map(int,input().split()))\n b = list(map(int,input().split()))\n ans = comopare_power(a,b,m,n)\n ans_list.append(ans)\n for i in range(test_cases): \n print(ans_list[i], end = \" \")\n","repo_name":"Keshav-Asopa/Daily_Coding_Problem","sub_path":"DCP11.py","file_name":"DCP11.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"10937614650","text":"# Author: Andrew Nell 2017/09/27 \n# HW2-Assignment 2\n\n###############################################################################\n\n# Code developed to pull information for a specific bus line from the MTA Bus\n# API and output all current vehicles coordinates and next stops and status\n\n###############################################################################\n\n# Imports to run code\nfrom __future__ import print_function\nimport sys\nimport json\n\ntry: \n import urllib2 as urllib\nexcept ImportError:\n import urllib.request as urllib # F. Bianco Reference 1\n\n###############################################################################\n# Import data from API \n\n# Ensure correct number of arguments entered \nif not len(sys.argv) == 4:\n print (\"Invalid number of arguments. Run as: python get_bus_info_adn\"\n \"323.py .csv\")\n sys.exit() # F. Bianco Reference 2\n\n# Define arguments\nkey, bus_line, outputfile = sys.argv[1], sys.argv[2], sys.argv[3]\n\n# Pull data from API using Key and Bus Line\ntry:\n url = (\"http://bustime.mta.info/api/siri/vehicle-monitoring.json?key=\"\n \"%s&VehicleMonitoringDetailLevel=calls&LineRef=%s\"%(key, bus_line))\n response = urllib.urlopen(url)\n data = response.read().decode(\"utf-8\")\n data = json.loads(data)\nexcept urllib.HTTPError:\n print(\"Invalid key and url. Please try again. Run as: python \" \n \"get_bus_info_adn323.py .csv\")\n sys.exit()\n\n###############################################################################\n# Define variables for output including Total Vehicles, Latitudes, longitudes \n# next stops and status for each vehicle and then print variables out and \n# create .csv file and start printing output into .csv\n\n# Define Number of vehicles\ntry:\n NoOfVehicles = (len(data['Siri']['ServiceDelivery']\n ['VehicleMonitoringDelivery'][0]['VehicleActivity']))\nexcept KeyError:\n print(\"Bus line does not exist or input invalid. Run as: \" \n \"python get_bus_info_adn323.py \")\n sys.exit()\n\n# create.csv \nfout = open(sys.argv[3], \"w\")\nfout.write(\"Latitude,Longitude,Stop Name,Stop Status\\n\")\n\n# Define latitude, longitutde, Next stop and statu of each bus and print \n# out in .CSV\nfor i in range(NoOfVehicles):\n \n # Define latitutde and Longitude\n \n latitude = (data['Siri']['ServiceDelivery']['VehicleMonitoringDelivery']\n [0]['VehicleActivity'][i]['MonitoredVehicleJourney']\n ['VehicleLocation']['Latitude'])\n \n longitude = (data['Siri']['ServiceDelivery']['VehicleMonitoringDelivery']\n [0]['VehicleActivity'][i]['MonitoredVehicleJourney']\n ['VehicleLocation']['Longitude']) \n \n # Define Next stop and account for errors\n try:\n\n stopname = (data['Siri']['ServiceDelivery']\n ['VehicleMonitoringDelivery'][0]['VehicleActivity'][i]\n ['MonitoredVehicleJourney']['OnwardCalls']['OnwardCall'][0]\n ['StopPointName'])\n except KeyError:\n stopname = \"N/A\"\n \n # Define status and account for errors \n try:\n\n stopstatus = (data['Siri']['ServiceDelivery']\n ['VehicleMonitoringDelivery'][0]['VehicleActivity'][i]\n ['MonitoredVehicleJourney']['OnwardCalls']['OnwardCall']\n [0]['Extensions']['Distances']['PresentableDistance'])\n except KeyError:\n stopstatus = \"N/A\"\n \n # Print outputs in desired format into .csv\n fout.write(str(latitude) + \",\" + str(longitude) + \",\" + str(stopname) + \n \",\" + str(stopstatus) + \"\\n\")\n\n\n\n###############################################################################\n\n# References \n\n# 1\n# F. Bianco, APIreadingJson.py.ipynb, access at: \n# https://github.com/fedhere/PUI2017_fb55/blob/master/Lab2_fb55/APIreadingJso\n# n.py.ipynb on 2017/09/27\n\n# 2 \n# F. Bianco, aSimplePythonThatWritesToCSV.py, access at: \n# https://github.com/fedhere/PUI2017_fb55/blob/master/Lab2_fb55/aSimplePython\n# ThatWritesToCSV.py on 2017/09/27 ","repo_name":"andrewnell/PUI2017_adn323","sub_path":"HW2_adn323/get_bus_info_adn323.py","file_name":"get_bus_info_adn323.py","file_ext":"py","file_size_in_byte":4219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"15594424211","text":"from PyQt5 import QtGui, QtWidgets\n\nfrom openlp.core.common.i18n import UiStrings, translate\nfrom openlp.core.lib.settingstab import SettingsTab\nfrom openlp.core.lib.ui import create_valign_selection_widgets\nfrom openlp.core.widgets.buttons import ColorButton\n\n\nclass AlertsTab(SettingsTab):\n \"\"\"\n AlertsTab is the alerts settings tab in the settings dialog.\n \"\"\"\n def setup_ui(self):\n self.setObjectName('AlertsTab')\n super(AlertsTab, self).setup_ui()\n self.font_group_box = QtWidgets.QGroupBox(self.left_column)\n self.font_group_box.setObjectName('font_group_box')\n self.font_layout = QtWidgets.QFormLayout(self.font_group_box)\n self.font_layout.setObjectName('font_layout')\n self.font_label = QtWidgets.QLabel(self.font_group_box)\n self.font_label.setObjectName('font_label')\n self.font_combo_box = QtWidgets.QFontComboBox(self.font_group_box)\n self.font_combo_box.setObjectName('font_combo_box')\n self.font_layout.addRow(self.font_label, self.font_combo_box)\n self.font_color_label = QtWidgets.QLabel(self.font_group_box)\n self.font_color_label.setObjectName('font_color_label')\n self.font_color_button = ColorButton(self.font_group_box)\n self.font_color_button.setObjectName('font_color_button')\n self.font_layout.addRow(self.font_color_label, self.font_color_button)\n self.font_size_label = QtWidgets.QLabel(self.font_group_box)\n self.font_size_label.setObjectName('font_size_label')\n self.font_size_spin_box = QtWidgets.QSpinBox(self.font_group_box)\n self.font_size_spin_box.setObjectName('font_size_spin_box')\n self.font_layout.addRow(self.font_size_label, self.font_size_spin_box)\n self.left_layout.addWidget(self.font_group_box)\n # Background Settings\n self.background_group_box = QtWidgets.QGroupBox(self.left_column)\n self.background_group_box.setObjectName('background_group_box')\n self.background_layout = QtWidgets.QFormLayout(self.background_group_box)\n self.background_layout.setObjectName('background_settings_layout')\n self.background_color_label = QtWidgets.QLabel(self.background_group_box)\n self.background_color_label.setObjectName('background_color_label')\n self.background_color_button = ColorButton(self.background_group_box)\n self.background_color_button.setObjectName('background_color_button')\n self.background_layout.addRow(self.background_color_label, self.background_color_button)\n self.left_layout.addWidget(self.background_group_box)\n # Scroll Settings\n self.scroll_group_box = QtWidgets.QGroupBox(self.left_column)\n self.scroll_group_box.setObjectName('scroll_group_box')\n self.scroll_group_layout = QtWidgets.QFormLayout(self.scroll_group_box)\n self.scroll_group_layout.setObjectName('scroll_group_layout')\n self.scroll_check_box = QtWidgets.QCheckBox(self.scroll_group_box)\n self.scroll_check_box.setObjectName('scroll_check_box')\n self.scroll_group_layout.addRow(self.scroll_check_box)\n self.repeat_label = QtWidgets.QLabel(self.scroll_group_box)\n self.repeat_label.setObjectName('repeat_label')\n self.repeat_spin_box = QtWidgets.QSpinBox(self.scroll_group_box)\n self.repeat_spin_box.setObjectName('repeat_spin_box')\n self.scroll_group_layout.addRow(self.repeat_label, self.repeat_spin_box)\n self.left_layout.addWidget(self.scroll_group_box)\n # Other Settings\n self.settings_group_box = QtWidgets.QGroupBox(self.left_column)\n self.settings_group_box.setObjectName('settings_group_box')\n self.settings_layout = QtWidgets.QFormLayout(self.settings_group_box)\n self.settings_layout.setObjectName('settings_layout')\n self.timeout_label = QtWidgets.QLabel(self.settings_group_box)\n self.timeout_label.setObjectName('timeout_label')\n self.timeout_spin_box = QtWidgets.QSpinBox(self.settings_group_box)\n self.timeout_spin_box.setMaximum(180)\n self.timeout_spin_box.setObjectName('timeout_spin_box')\n self.settings_layout.addRow(self.timeout_label, self.timeout_spin_box)\n self.vertical_label, self.vertical_combo_box = create_valign_selection_widgets(self.font_group_box)\n self.vertical_label.setObjectName('vertical_label')\n self.vertical_combo_box.setObjectName('vertical_combo_box')\n self.settings_layout.addRow(self.vertical_label, self.vertical_combo_box)\n self.left_layout.addWidget(self.settings_group_box)\n self.left_layout.addStretch()\n self.preview_group_box = QtWidgets.QGroupBox(self.right_column)\n self.preview_group_box.setObjectName('preview_group_box')\n self.preview_layout = QtWidgets.QVBoxLayout(self.preview_group_box)\n self.preview_layout.setObjectName('preview_layout')\n self.font_preview = QtWidgets.QLineEdit(self.preview_group_box)\n self.font_preview.setObjectName('font_preview')\n self.preview_layout.addWidget(self.font_preview)\n self.right_layout.addWidget(self.preview_group_box)\n self.right_layout.addStretch()\n # Signals and slots\n self.background_color_button.colorChanged.connect(self.on_background_color_changed)\n self.font_color_button.colorChanged.connect(self.on_font_color_changed)\n self.font_combo_box.activated.connect(self.on_font_combo_box_clicked)\n self.timeout_spin_box.valueChanged.connect(self.on_timeout_spin_box_changed)\n self.font_size_spin_box.valueChanged.connect(self.on_font_size_spin_box_changed)\n self.repeat_spin_box.valueChanged.connect(self.on_repeat_spin_box_changed)\n self.scroll_check_box.toggled.connect(self.scroll_check_box_toggled)\n\n def retranslate_ui(self):\n self.font_group_box.setTitle(translate('AlertsPlugin.AlertsTab', 'Font Settings'))\n self.font_label.setText(translate('AlertsPlugin.AlertsTab', 'Font name:'))\n self.font_color_label.setText(translate('AlertsPlugin.AlertsTab', 'Font color:'))\n self.background_color_label.setText(UiStrings().BackgroundColorColon)\n self.font_size_label.setText(translate('AlertsPlugin.AlertsTab', 'Font size:'))\n self.font_size_spin_box.setSuffix(' {unit}'.format(unit=UiStrings().FontSizePtUnit))\n self.background_group_box.setTitle(translate('AlertsPlugin.AlertsTab', 'Background Settings'))\n self.settings_group_box.setTitle(translate('AlertsPlugin.AlertsTab', 'Other Settings'))\n self.timeout_label.setText(translate('AlertsPlugin.AlertsTab', 'Alert timeout:'))\n self.timeout_spin_box.setSuffix(' {unit}'.format(unit=UiStrings().Seconds))\n self.repeat_label.setText(translate('AlertsPlugin.AlertsTab', 'Repeat (no. of times):'))\n self.scroll_check_box.setText(translate('AlertsPlugin.AlertsTab', 'Enable Scrolling'))\n self.preview_group_box.setTitle(UiStrings().Preview)\n self.font_preview.setText(UiStrings().OpenLP)\n\n def on_background_color_changed(self, color):\n \"\"\"\n The background color has been changed.\n \"\"\"\n self.background_color = color\n self.update_display()\n\n def on_font_combo_box_clicked(self):\n \"\"\"\n The Font Combo was changed.\n \"\"\"\n self.update_display()\n\n def on_font_color_changed(self, color):\n \"\"\"\n The Font Color button has clicked.\n \"\"\"\n self.font_color = color\n self.update_display()\n\n def on_timeout_spin_box_changed(self):\n \"\"\"\n The Time out spin box has changed.\n\n \"\"\"\n self.timeout = self.timeout_spin_box.value()\n self.changed = True\n\n def on_font_size_spin_box_changed(self):\n \"\"\"\n The font size spin box has changed.\n \"\"\"\n self.font_size = self.font_size_spin_box.value()\n self.update_display()\n\n def on_repeat_spin_box_changed(self):\n \"\"\"\n The repeat spin box has changed\n \"\"\"\n self.repeat = self.repeat_spin_box.value()\n self.changed = True\n\n def scroll_check_box_toggled(self):\n \"\"\"\n The scrolling checkbox has been toggled\n \"\"\"\n if self.scroll_check_box.isChecked():\n self.repeat_spin_box.setEnabled(True)\n else:\n self.repeat_spin_box.setEnabled(False)\n self.scroll = self.scroll_check_box.isChecked()\n self.changed = True\n\n def load(self):\n \"\"\"\n Load the settings into the UI.\n \"\"\"\n self.settings.beginGroup(self.settings_section)\n self.timeout = self.settings.value('timeout')\n self.font_color = self.settings.value('font color')\n self.font_size = self.settings.value('font size')\n self.background_color = self.settings.value('background color')\n self.font_face = self.settings.value('font face')\n self.location = self.settings.value('location')\n self.repeat = self.settings.value('repeat')\n self.scroll = self.settings.value('scroll')\n self.settings.endGroup()\n self.font_size_spin_box.setValue(self.font_size)\n self.timeout_spin_box.setValue(self.timeout)\n self.font_color_button.color = self.font_color\n self.background_color_button.color = self.background_color\n self.repeat_spin_box.setValue(self.repeat)\n self.repeat_spin_box.setEnabled(self.scroll)\n self.vertical_combo_box.setCurrentIndex(self.location)\n self.scroll_check_box.setChecked(self.scroll)\n font = QtGui.QFont()\n font.setFamily(self.font_face)\n self.font_combo_box.setCurrentFont(font)\n self.update_display()\n self.changed = False\n\n def save(self):\n \"\"\"\n Save the changes on exit of the Settings dialog.\n \"\"\"\n self.settings.beginGroup(self.settings_section)\n # Check value has changed as no event handles this field\n if self.settings.value('location') != self.vertical_combo_box.currentIndex():\n self.changed = True\n self.settings.setValue('background color', self.background_color)\n self.settings.setValue('font color', self.font_color)\n self.settings.setValue('font size', self.font_size)\n self.font_face = self.font_combo_box.currentFont().family()\n self.settings.setValue('font face', self.font_face)\n self.settings.setValue('timeout', self.timeout)\n self.location = self.vertical_combo_box.currentIndex()\n self.settings.setValue('location', self.location)\n self.settings.setValue('repeat', self.repeat)\n self.settings.setValue('scroll', self.scroll_check_box.isChecked())\n self.settings.endGroup()\n if self.changed:\n self.settings_form.register_post_process('update_display_css')\n self.changed = False\n\n def update_display(self):\n \"\"\"\n Update the preview display after changes have been made,\n \"\"\"\n font = QtGui.QFont()\n font.setFamily(self.font_combo_box.currentFont().family())\n font.setBold(True)\n font.setPointSize(self.font_size)\n self.font_preview.setFont(font)\n self.font_preview.setStyleSheet('background-color: {back}; color: {front}'.format(back=self.background_color,\n front=self.font_color))\n self.changed = True\n","repo_name":"ipic/projecao","sub_path":"openlp/plugins/alerts/lib/alertstab.py","file_name":"alertstab.py","file_ext":"py","file_size_in_byte":11457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"}
+{"seq_id":"28264453301","text":"import sys\nimport requests\n\n\ndef _help_():\n meta_data = {\n 'all': 'Provides all the details of your instance',\n 'accountId': 'Provides Account Id of your instance',\n 'architecture': 'Provides architecture of your instance',\n 'availabilityZone': 'Provides Availability Zone of your instance',\n 'billingProducts': \"Provides Billing Products of your instance\",\n 'devpayProductCodes': \"Provides payable products of your instance\",\n 'marketplaceProductCodes': \"Provides Marketplace details of your instance\",\n 'imageId': \"Provides AMI number/Id of your instance\",\n 'instanceId': 'Provides instance Id of your instance',\n 'instanceType': 'Provides instance type', 'kernelId': \"Provides kernelId of your instance\",\n 'pendingTime': 'Provides pending time of your instance',\n 'privateIp': 'Provides Private IP of your instance',\n 'ramdiskId': \"Provides RAM disk Id of your instance\", 'region': 'Provides region of your instance',\n 'version': 'Provides version of your instance'\n }\n print(\"Use any option provided below as a flag to run the script\")\n for key, value in meta_data.items():\n print(\"Option: {} ({})\".format(key, value))\n\n\ndef get_meta_data(key=\"all\"):\n url = \"http://169.254.169.254/latest/dynamic/instance-identity/document\"\n response = requests.get(url)\n if response.status_code == 200:\n response = response.json()\n if key == \"all\":\n return response\n else:\n if key in response:\n return response[key]\n else:\n return \"Please check the key provided \\nUse help for more options\\n \" \\\n \"if no key is provided all is considered by default\"\n else:\n return \"Please try again after sometime\"\n\n\nflag = sys.argv[-1]\n\n\nif flag == \"help\":\n _help_()\nelse:\n if flag != \"challenge2.py\":\n print(get_meta_data(flag))\n else:\n print(get_meta_data())\n","repo_name":"AkshayArni003/Interview","sub_path":"coding/challenge2.py","file_name":"challenge2.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"74378122793","text":"#用requests库下载图片\nimport unittest\nimport requests\nimport io\nimport matplotlib.pyplot as plt\n\n\nclass SimpleImageDownloaderTest(unittest.TestCase):\n def test_download_image(self):\n resp = requests.get('https://www.baidu.com/img/PCtm_d9c8750bed0b3c7d089fa7d55720d6cf.png') # 目标图片的url\n with open('D:/python/venv/onlinelearning/a.jpg', 'wb')as f: # 使用with结构打开本地文件,如果省略路径则在当前目录中\n f.write(resp.content) # 将二进制数据写入到文件中\n\n # 用PIL库以流的方式读取此图片的内容\n from PIL import Image\n img = Image.open('D:/python/venv/onlinelearning/a.jpg')\n print(img)\n\n # 用matplotlib中的matplotlib.pyplot.imshow函数显示该图片\n plt.subplot(221);\n plt.imshow(img)\n plt.show()\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"yzxty/learngit","sub_path":"python/py6_1.py","file_name":"py6_1.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"38428400406","text":"import unittest\n\nfrom faker import Faker\n\nfrom .model_test import ModelTest\nfrom random import randrange\n\nclass TestLottery(ModelTest):\n\n\n faker = Faker()\n\n @classmethod\n def setUpClass(cls):\n super(TestLottery, cls).setUpClass()\n\n from mib.models import lottery\n cls.lottery = lottery\n\n @staticmethod\n def assertLotteryEquals(value, expected):\n t = unittest.FunctionTestCase(TestLottery)\n t.assertEqual(value.id, expected.id)\n t.assertEqual(value.ticket_number, expected.ticket_number)\n t.assertEqual(value.points, expected.points)\n \n @staticmethod\n def generate_random_lottery_row():\n id = randrange(100000)\n ticket_number = randrange(100)\n points = 0\n\n from mib.models import Lottery\n\n lottery = Lottery(\n id = id,\n ticket_number = ticket_number,\n points = points\n )\n\n return lottery\n\n def test_set_ticket_number(self):\n row = TestLottery.generate_random_lottery_row()\n row.set_ticket_number(15)\n\n self.assertEqual(\n row.ticket_number,\n 15\n )\n def test_unset_ticket_number(self):\n row = TestLottery.generate_random_lottery_row()\n row.unset_ticket_number()\n self.assertEqual(\n row.ticket_number,\n -1\n )\n def test_add_points(self):\n row = TestLottery.generate_random_lottery_row()\n points = row.points\n row.add_points(5)\n self.assertEqual(row.points, points+5)\n\n def test_set_points(self):\n row = TestLottery.generate_random_lottery_row()\n row.set_points(5)\n self.assertEqual(row.points,5)\n","repo_name":"lcnz/mib-lottery","sub_path":"tests/models/test_lottery.py","file_name":"test_lottery.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"70444247592","text":"from typing import Optional\nimport json\nimport asyncio\n\nimport websockets\nimport agorartc\nfrom fastapi import FastAPI, Query, Form, Request, status, BackgroundTasks\nfrom fastapi.responses import RedirectResponse, HTMLResponse\nfrom fastapi.templating import Jinja2Templates\nfrom pydantic import BaseModel\nfrom gtts import gTTS\nfrom clubhouse.clubhouse import Clubhouse\n\nfrom donatehouse import da\nfrom donatehouse import settings\nfrom donatehouse import utils\n\n\napp = FastAPI()\ntemplates = Jinja2Templates(directory='donatehouse/templates')\n\n\nclass ClubhouseConfig(BaseModel):\n user_id: Optional[str]\n user_token: Optional[str]\n user_device: Optional[str]\n channel_id: Optional[str]\n language: Optional[str]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n clubhouse_config = utils.read_ch_config()\n self.user_id = clubhouse_config.get('user_id')\n self.user_token = clubhouse_config.get('user_token')\n self.user_device = clubhouse_config.get('user_device')\n self.channel_id = clubhouse_config.get('channel_id')\n self.language = clubhouse_config.get('language')\n\n\nclass DaConfig(BaseModel):\n client_id: Optional[int]\n client_secret: Optional[str]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n config = utils.read_da_config()\n self.client_id = config.get('client_id')\n self.client_secret = config.get('client_secret')\n\n\nch_config = ClubhouseConfig()\nclient = Clubhouse(user_id=ch_config.user_id,\n user_token=ch_config.user_token,\n user_device=ch_config.user_device)\n\nda_config = DaConfig()\nda = da.DonationAlertsApi(da_config.client_id,\n da_config.client_secret,\n settings.REDIRECT_URI,\n settings.SCOPE)\n\n\n@app.get('/index')\nasync def index():\n if not (ch_config.user_id\n and ch_config.user_token\n and ch_config.user_device\n and ch_config.channel_id\n and ch_config.language):\n return RedirectResponse('/enter_phone')\n\n if not (da_config.client_id and da_config.client_secret):\n return RedirectResponse('/da_config')\n\n return RedirectResponse(da.authorize())\n\n\n@app.get('/')\nasync def default_handler():\n return ''\n\n\n@app.get('/code')\nasync def da_code_handler(background_tasks: BackgroundTasks,\n code: str = Query(...),):\n da.get_access_token(code)\n da.get_user_info()\n background_tasks.add_task(connect)\n return RedirectResponse('/', status_code=status.HTTP_303_SEE_OTHER)\n\n\n@app.get('/enter_phone', response_class=HTMLResponse)\nasync def enter_phone_page(request: Request):\n return templates.TemplateResponse('enter_phone.html', {\"request\": request})\n\n\n@app.post('/clubhouse_auth', response_class=HTMLResponse)\nasync def clubhouse_config_page(request: Request,\n phone_number: str = Form(...)):\n client.start_phone_number_auth(phone_number)\n return templates.TemplateResponse('clubhouse_config.html',\n {\"request\": request,\n 'phone_number': phone_number})\n\n\n@app.post('/clubhouse_config')\nasync def clubhouse_config_handler(phone_number: str = Form(...),\n code: str = Form(...),\n channel: str = Form(...),\n lang: str = Form(...)):\n data = client.complete_phone_number_auth(phone_number, code)\n if 'user_profile' in data:\n ch_config.user_id = str(data['user_profile']['user_id'])\n ch_config.user_token = data['auth_token']\n ch_config.user_device = client.HEADERS.get(\"CH-DeviceId\")\n ch_config.channel_id = channel\n ch_config.language = lang\n\n utils.write_ch_config(str(data['user_profile']['user_id']),\n data['auth_token'],\n client.HEADERS.get(\"CH-DeviceId\"),\n channel,\n lang)\n\n return RedirectResponse('/index', status_code=status.HTTP_303_SEE_OTHER)\n\n\n@app.get('/da_config', response_class=HTMLResponse)\nasync def da_config_page(request: Request):\n return templates.TemplateResponse('da_config.html',\n {\"request\": request})\n\n\n@app.post('/da_config')\nasync def da_config_handler(client_id: int = Form(...),\n client_secret: str = Form(...)):\n da_config.client_id = client_id\n da_config.client_secret = client_secret\n da.client_id = client_id\n da.client_secret = client_secret\n utils.write_da_config(client_id, client_secret)\n return RedirectResponse('/index', status_code=status.HTTP_303_SEE_OTHER)\n\n\nasync def clubhouse_ping():\n while True:\n print('PING')\n client.active_ping(ch_config.channel_id)\n await asyncio.sleep(300)\n\n\nasync def connect():\n ch_config = ClubhouseConfig()\n client = Clubhouse(user_id=ch_config.user_id,\n user_token=ch_config.user_token,\n user_device=ch_config.user_device)\n async with websockets.connect(settings.CENTRIFUGO_WS) as ws:\n print('DA CONNECTED')\n await ws.send(json.dumps(da.ws_authorize()))\n data = await ws.recv()\n data = json.loads(data)\n da.set_centrifugo_client_id(data['result']['client'])\n da.subscribe()\n await ws.send(json.dumps(da.ws_connect()))\n await ws.recv()\n await ws.recv()\n\n RTC = agorartc.createRtcEngineBridge()\n event_handler = agorartc.RtcEngineEventHandlerBase()\n RTC.initEventHandler(event_handler)\n # 0xFFFFFFFE will exclude Chinese servers from Agora's servers.\n RTC.initialize(Clubhouse.AGORA_KEY,\n None,\n agorartc.AREA_CODE_GLOB & 0xFFFFFFFE)\n # Enhance voice quality\n RTC.setAudioProfile(agorartc.AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO,\n agorartc.AUDIO_SCENARIO_GAME_STREAMING)\n\n channel_info = client.join_channel(ch_config.channel_id)\n\n asyncio.create_task(clubhouse_ping())\n\n channel_token = channel_info['token']\n users = channel_info['users']\n\n speaker_permission = False\n while not speaker_permission:\n for user in users:\n if bool(user['is_speaker']):\n data = client.accept_speaker_invite(ch_config.channel_id,\n user['user_id'])\n if data['success']:\n speaker_permission = True\n break\n print('Please, invite')\n await asyncio.sleep(10)\n\n while True:\n data = await ws.recv()\n data = json.loads(data)\n print(data)\n data = data['result']['data']['data']\n username = data['username']\n message = data['message']\n text_to_speech = f'Message from {username}. {message}'\n tts_obj = gTTS(text=text_to_speech,\n lang=ch_config.language,\n slow=False)\n tts_obj.save('donation.mp3')\n\n RTC.joinChannel(channel_token,\n ch_config.channel_id,\n \"\",\n int(ch_config.user_id))\n await asyncio.sleep(0.1)\n RTC.startAudioMixing('donation.mp3', False, True, 1)\n donation_duration = RTC.getAudioMixingDuration()\n await asyncio.sleep(donation_duration / 1000 + 0.5)\n RTC.leaveChannel()\n","repo_name":"kirillkuzin/donatehouse","sub_path":"donatehouse/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7770,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"}
+{"seq_id":"4588856858","text":"# from os import path\nfrom io import open\nfrom setuptools import setup, find_packages\n\n\ndef read(f):\n return open(f, \"r\", encoding='utf-8').read()\n\n\nsetup(\n name=\"amocrm-api-wrapper\",\n version='0.0.17',\n packages=find_packages(exclude=(\"tests\",)),\n install_requires=[\n \"requests\",\n ],\n description=\"Amocrm api wrapper v4\",\n author=\"bzdvdn\",\n author_email=\"bzdv.dn@gmail.com\",\n url=\"https://github.com/bzdvdn/amocrm-api-wrapper\",\n license=\"MIT\",\n python_requires=\">=3.6\",\n long_description=read(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n)\n","repo_name":"bzdvdn/amocrm-api-wrapper","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"}
+{"seq_id":"1783087237","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2022/11/25 15:22\n\n@author: Yang Fan\n\n模型的组成,包括LightGBM的训练, 多棵树分组, Embedding模型, GBDT2NN模型\n\"\"\"\nimport math\n\nimport lightgbm as lgb\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport wandb\nfrom utils import ModelInterpreter\nfrom wandb.lightgbm import wandb_callback\n\n\n# LightGBM的训练\ndef TrainGBDT(train_x, train_y, test_x, test_y):\n params = {\n \"task\": \"train\",\n \"boosting_type\": \"gbdt\",\n \"num_class\": 1,\n \"objective\": \"regression\",\n \"metric\": \"mse\",\n \"boost_from_average\": True,\n \"num_leaves\": wandb.config.num_leaves,\n \"feature_fraction\": wandb.config.feature_fraction,\n \"bagging_freq\": wandb.config.bagging_freq,\n \"bagging_fraction\": wandb.config.bagging_fraction,\n \"num_threads\": wandb.config.num_threads,\n \"learning_rate\": wandb.config.tree_lr,\n \"seed\": wandb.config.seed,\n }\n lgb_train = lgb.Dataset(train_x, train_y.reshape(-1), params=params)\n lgb_eval = lgb.Dataset(test_x, test_y.reshape(-1), reference=lgb_train)\n # early_stop_callback = lgb.early_stopping(\n # stopping_rounds=wandb.config.early_stopping_rounds\n # )\n gbm = lgb.train(\n params,\n lgb_train,\n num_boost_round=wandb.config.num_trees,\n valid_sets=[lgb_eval],\n callbacks=[wandb_callback()],\n )\n preds = gbm.predict(test_x, raw_score=True)\n preds = preds.astype(np.float32)\n return gbm, preds\n\n\n# 将n棵树分组\ndef SubGBDTLeaf_cls(train_x, test_x, gbm):\n num_slices = wandb.config.num_slices\n MAX = train_x.shape[1]\n\n # get leaf prediction index\n leaf_preds = gbm.predict(train_x, pred_leaf=True).reshape(\n train_x.shape[0], -1\n )\n test_leaf_preds = gbm.predict(test_x, pred_leaf=True).reshape(\n test_x.shape[0], -1\n )\n n_trees = leaf_preds.shape[1]\n\n # get leaf output from each tree\n leaf_output = np.zeros(\n [n_trees, wandb.config.num_leaves], dtype=np.float32\n )\n for tree_id in range(n_trees):\n num_leaf = np.max(leaf_preds[:, tree_id]) + 1\n for leaf_id in range(num_leaf):\n leaf_output[tree_id][leaf_id] = gbm.get_leaf_output(\n tree_id, leaf_id\n )\n\n modelI = ModelInterpreter(gbm)\n clusterIdx = modelI.EqualGroup(num_slices)\n n_feature = wandb.config.feature_per_group\n treeI = modelI.trees\n\n for n_idx in range(num_slices):\n tree_indices = np.where(clusterIdx == n_idx)[0]\n trees = {}\n tid = 0\n for jdx in tree_indices:\n trees[str(tid)] = treeI[jdx].raw\n tid += 1\n\n all_hav = {}\n for jdx, tree in enumerate(tree_indices):\n for kdx, f in enumerate(treeI[tree].feature):\n if f == -2:\n continue\n if f not in all_hav:\n all_hav[f] = 0\n all_hav[f] += treeI[tree].gain[kdx]\n\n all_hav = sorted(all_hav.items(), key=lambda kv: -kv[1])\n used_features = [item[0] for item in all_hav[:n_feature]]\n\n for kdx in range(max(0, n_feature - len(used_features))):\n used_features.append(MAX)\n cur_leaf_preds = leaf_preds[:, tree_indices]\n cur_test_leaf_preds = test_leaf_preds[:, tree_indices]\n new_train_y = np.zeros(train_x.shape[0])\n for jdx in tree_indices:\n new_train_y += np.take(\n leaf_output[jdx, :].reshape(-1), leaf_preds[:, jdx].reshape(-1)\n )\n new_train_y = new_train_y.reshape(-1, 1).astype(np.float32)\n yield used_features, new_train_y, cur_leaf_preds, cur_test_leaf_preds, np.mean(\n np.take(leaf_output, tree_indices, 0)\n ), np.mean(\n leaf_output\n )\n\n\nclass BatchDense(nn.Module):\n def __init__(self, batch, in_features, out_features, bias_init=None):\n super(BatchDense, self).__init__()\n self.batch = batch\n self.in_features = in_features\n self.out_features = out_features\n self.weight = nn.Parameter(\n torch.Tensor(batch, in_features, out_features), requires_grad=True\n )\n self.bias = nn.Parameter(\n torch.Tensor(batch, 1, out_features), requires_grad=True\n )\n self.reset_parameters(bias_init)\n\n def reset_parameters(self, bias_init=None):\n stdv = math.sqrt(6.0 / (self.in_features + self.out_features))\n self.weight.data.uniform_(-stdv, stdv)\n if bias_init is not None:\n self.bias.data = torch.from_numpy(bias_init)\n else:\n self.bias.data.fill_(0)\n\n def forward(self, x):\n size = x.size()\n # Todo: avoid the swap axis\n x = x.view(x.size(0), self.batch, -1)\n out = x.transpose(0, 1).contiguous()\n out = torch.baddbmm(self.bias, out, self.weight)\n out = out.transpose(0, 1).contiguous()\n out = out.view(x.size(0), -1)\n return out\n\n\nclass EmbeddingModel(nn.Module):\n def __init__(\n self,\n n_models,\n max_ntree_per_split,\n embsize,\n maxleaf,\n n_output,\n out_bias=None,\n task=\"regression\",\n ):\n super(EmbeddingModel, self).__init__()\n self.task = task\n self.n_models = n_models\n self.maxleaf = maxleaf\n self.fcs = nn.ModuleList()\n self.max_ntree_per_split = max_ntree_per_split\n\n self.embed_w = nn.Parameter(\n torch.Tensor(n_models, max_ntree_per_split * maxleaf, embsize),\n requires_grad=True,\n )\n # torch.nn.init.xavier_normal(self.embed_w)\n stdv = math.sqrt(1.0 / (max_ntree_per_split))\n self.embed_w.data.normal_(0, stdv) # .uniform_(-stdv, stdv)\n\n self.bout = BatchDense(n_models, embsize, 1, out_bias)\n self.bn = nn.BatchNorm1d(embsize * n_models)\n self.tanh = nn.Tanh()\n self.sigmoid = nn.Sigmoid()\n # self.output_fc = Dense(n_models * embsize, n_output)\n self.dropout = torch.nn.Dropout()\n if task == \"regression\":\n self.criterion = nn.MSELoss()\n else:\n self.criterion = nn.BCELoss()\n self.device = torch.device(\n \"cuda\" if torch.cuda.is_available() else \"cpu\"\n )\n\n def one_hot(self, y, numslot, mask=None):\n y_tensor = (\n y.type(torch.LongTensor).contiguous().view(-1, 1).to(self.device)\n )\n tmp = torch.zeros(\n y_tensor.size()[0],\n numslot,\n device=self.device,\n dtype=torch.float32,\n requires_grad=False,\n ).to(self.device)\n y_one_hot = tmp.scatter_(1, y_tensor.to(self.device), 1)\n if mask is not None:\n y_one_hot = y_one_hot * mask\n y_one_hot = y_one_hot.view(y.shape[0], -1)\n return y_one_hot\n\n def batchmul(self, x, models, embed_w, length):\n out = self.one_hot(x, length)\n out = out.view(x.size(0), models, -1)\n out = out.transpose(0, 1).contiguous()\n out = torch.bmm(out, embed_w)\n out = out.transpose(0, 1).contiguous()\n out = out.view(x.size(0), -1)\n return out\n\n def lastlayer(self, x):\n out = self.batchmul(x, self.n_models, self.embed_w, self.maxleaf)\n out = self.bn(out)\n return out\n\n def forward(self, x):\n out = self.lastlayer(x)\n out = self.dropout(out)\n out = out.view(x.size(0), self.n_models, -1)\n out = self.bout(out)\n # out = self.output_fc(out)\n sum_out = torch.sum(out, -1, True)\n if self.task != \"regression\":\n return self.sigmoid(sum_out), out\n return sum_out, out\n\n def joint_loss(self, out, target, out_inner, target_inner, *args):\n return nn.MSELoss()(out_inner, target_inner)\n\n def true_loss(self, out, target):\n return self.criterion(out, target)\n\n\nclass GBDT2NN(nn.Module):\n def __init__(\n self,\n input_size,\n used_features,\n tree_layers,\n output_w,\n output_b,\n device=None,\n ):\n super(GBDT2NN, self).__init__()\n print(\"Init GBDT2NN\")\n self.n_models = len(used_features)\n self.tree_layers = tree_layers\n n_feature = len(used_features[0])\n used_features = np.asarray(used_features).reshape(-1)\n self.used_features = nn.Parameter(\n torch.from_numpy(used_features).to(device), requires_grad=False\n )\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n assert len(tree_layers) > 0\n self.bdenses = nn.ModuleList()\n self.bns = nn.ModuleList()\n self.bdenses.append(\n BatchDense(self.n_models, n_feature, tree_layers[0])\n )\n for i in range(1, len(tree_layers)):\n self.bdenses.append(\n BatchDense(self.n_models, tree_layers[i - 1], tree_layers[i])\n )\n for i in range(len(tree_layers) - 1):\n self.bns.append(nn.BatchNorm1d(tree_layers[i] * self.n_models))\n self.out_weight = nn.Parameter(\n torch.from_numpy(output_w).to(device), requires_grad=False\n )\n self.out_bias = nn.Parameter(\n torch.from_numpy(output_b).to(device), requires_grad=False\n )\n print(\"Init GBDT2NN succeed!\")\n self.criterion = nn.MSELoss()\n self.device = device\n\n def batchmul(self, x, f):\n out = x.view(x.size(0), self.n_models, -1)\n out = f(out)\n out = out.view(x.size(0), -1)\n return out\n\n def lastlayer(self, x):\n out = torch.index_select(\n x.to(self.device), dim=1, index=self.used_features.to(self.device)\n )\n for i in range(len(self.bdenses) - 1):\n out = self.batchmul(out, self.bdenses[i])\n out = self.bns[i](out)\n out = self.relu(out)\n return out\n\n def forward(self, x):\n out = self.lastlayer(x.float())\n pred = self.batchmul(out, self.bdenses[-1])\n out = torch.addmm(self.out_bias, pred, self.out_weight)\n return out, pred\n\n def emb_loss(self, emb_pred, emb_target):\n loss_weight = torch.abs(torch.sum(self.out_weight, 1))\n l2_loss = (\n nn.MSELoss(reduction=\"none\")(emb_pred, emb_target) * loss_weight\n )\n return torch.mean(torch.sum(l2_loss, dim=1))\n\n def joint_loss(self, out, target, emb_pred, emb_target, ratio):\n return (1 - ratio) * self.criterion(\n out, target\n ) + ratio * self.emb_loss(emb_pred, emb_target)\n\n def true_loss(self, out, target):\n return self.criterion(out.to(self.device), target.to(self.device))\n","repo_name":"ELKYang/quant_research","sub_path":"2.DeepGBM/model_components.py","file_name":"model_components.py","file_ext":"py","file_size_in_byte":10689,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"6552344261","text":"# Django settings\n\ntry:\n from local_settings import *\nexcept ImportError:\n import sys\n sys.stderr.write(\"Error importing local settings. Did you remember to make a local_settings.py?\\n\");\n sys.exit(1)\n\n\nADMINS = (\n ('Carl Jackson', 'ctj@mit.edu'),\n)\n\nMANAGERS = ADMINS\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale\nUSE_L10N = True\n\n# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a\n# trailing slash.\n# Examples: \"http://foo.com/media/\", \"/media/\".\n# ADMIN_MEDIA_PREFIX = '/admin/media/'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.load_template_source',\n 'django.template.loaders.app_directories.load_template_source',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.csrf.middleware.CsrfMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n)\n\nROOT_URLCONF = 'urls'\n\nTEMPLATE_DIRS = (\n SITE_ROOT + 'templates/',\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.humanize',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.admin',\n 'registration',\n 'competition',\n 'abacus',\n)\n\n# Email stuff\nDEFAULT_FROM_EMAIL = 'Harvard-MIT Mathematics Tournament '\nEMAIL_PREFIX = '[HMMT] '\n\n# Login stuff\nLOGIN_URL = '/february/accounts/login/'\nLOGIN_REDIRECT_URL = '/february/registration/teams/'\n","repo_name":"zenazn/coatl","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"}
+{"seq_id":"38932638129","text":"\nimport subprocess\n\n__download_tools = {}\n\ndefault_config = {\n \"aria2\": {\n \"extra_opts\": (\"-s10\", \"-x10\")\n }\n}\n\n\ndef get_download_tool(name):\n return __download_tools.get(name, None)\n\ndef download_tool(name):\n def register(cls):\n config = default_config.get(name, None)\n assert isinstance(config, dict)\n if config:\n __download_tools[name] = cls(**config)\n else:\n __download_tools[name] = cls()\n return cls\n return register\n\n\nclass DownloadTool(object):\n \"\"\"\n Interface definition for all download tools\n \"\"\"\n def download(self, uri='', resume=True, dir_path=\"\", file_name=\"\", headers=\"\"):\n pass\n\n@download_tool(\"aria2\")\nclass Aria2(DownloadTool):\n def __init__(self, default_header=\"\", extra_opts=None):\n self.__default_headers = default_header\n self.__extra_opts = None\n if isinstance(extra_opts, str):\n self.__extra_opts = extra_opts.split()\n elif type(extra_opts) in (list, tuple):\n self.__extra_opts = extra_opts\n else:\n self.__extra_opts = str(extra_opts)\n\n def download(self, uri='', resume=True, dir_path=\"\", file_name=\"\", headers=\"\"):\n aria2_opts = ['aria2c', '--header=' + headers, uri, '--dir', dir_path, '--out', file_name, '--file-allocation=none']\n if resume:\n aria2_opts.append('-c')\n if self.__extra_opts:\n aria2_opts.extend(self.__extra_opts)\n exit_code = subprocess.call(aria2_opts)\n if exit_code != 0:\n raise Exception('aria2c exited abnormally')\n","repo_name":"pandazxx/music163","sub_path":"downloadtool.py","file_name":"downloadtool.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"14654024386","text":"import numpy as np\r\nimport tensorflow as tf\r\nimport streamlit as st\r\nfrom PIL import Image\r\nimport random\r\n\r\n# list of all 101 class names\r\nclass_names = ['apple_pie',\r\n 'baby_back_ribs',\r\n 'baklava',\r\n 'beef_carpaccio',\r\n 'beef_tartare',\r\n 'beet_salad',\r\n 'beignets',\r\n 'bibimbap',\r\n 'bread_pudding',\r\n 'breakfast_burrito',\r\n 'bruschetta',\r\n 'caesar_salad',\r\n 'cannoli',\r\n 'caprese_salad',\r\n 'carrot_cake',\r\n 'ceviche',\r\n 'cheesecake',\r\n 'cheese_plate',\r\n 'chicken_curry',\r\n 'chicken_quesadilla',\r\n 'chicken_wings',\r\n 'chocolate_cake',\r\n 'chocolate_mousse',\r\n 'churros',\r\n 'clam_chowder',\r\n 'club_sandwich',\r\n 'crab_cakes',\r\n 'creme_brulee',\r\n 'croque_madame',\r\n 'cup_cakes',\r\n 'deviled_eggs',\r\n 'donuts',\r\n 'dumplings',\r\n 'edamame',\r\n 'eggs_benedict',\r\n 'escargots',\r\n 'falafel',\r\n 'filet_mignon',\r\n 'fish_and_chips',\r\n 'foie_gras',\r\n 'french_fries',\r\n 'french_onion_soup',\r\n 'french_toast',\r\n 'fried_calamari',\r\n 'fried_rice',\r\n 'frozen_yogurt',\r\n 'garlic_bread',\r\n 'gnocchi',\r\n 'greek_salad',\r\n 'grilled_cheese_sandwich',\r\n 'grilled_salmon',\r\n 'guacamole',\r\n 'gyoza',\r\n 'hamburger',\r\n 'hot_and_sour_soup',\r\n 'hot_dog',\r\n 'huevos_rancheros',\r\n 'hummus',\r\n 'ice_cream',\r\n 'lasagna',\r\n 'lobster_bisque',\r\n 'lobster_roll_sandwich',\r\n 'macaroni_and_cheese',\r\n 'macarons',\r\n 'miso_soup',\r\n 'mussels',\r\n 'nachos',\r\n 'omelette',\r\n 'onion_rings',\r\n 'oysters',\r\n 'pad_thai',\r\n 'paella',\r\n 'pancakes',\r\n 'panna_cotta',\r\n 'peking_duck',\r\n 'pho',\r\n 'pizza',\r\n 'pork_chop',\r\n 'poutine',\r\n 'prime_rib',\r\n 'pulled_pork_sandwich',\r\n 'ramen',\r\n 'ravioli',\r\n 'red_velvet_cake',\r\n 'risotto',\r\n 'samosa',\r\n 'sashimi',\r\n 'scallops',\r\n 'seaweed_salad',\r\n 'shrimp_and_grits',\r\n 'spaghetti_bolognese',\r\n 'spaghetti_carbonara',\r\n 'spring_rolls',\r\n 'steak',\r\n 'strawberry_shortcake',\r\n 'sushi',\r\n 'tacos',\r\n 'takoyaki',\r\n 'tiramisu',\r\n 'tuna_tartare',\r\n 'waffles']\r\n\r\n\r\n# loading the modle\r\n@st.cache(allow_output_mutation=True) # setting up cache for the model\r\ndef load_model():\r\n model = tf.keras.models.load_model('effi_080_second.h5')\r\n return model\r\n\r\n# call the model to predict the class of the image\r\nmodel = load_model()\r\n\r\n# showing a Header\r\n# st.title('Food 101 Classifier™')\r\nst.markdown(\"Food 101 Classifier™
\", unsafe_allow_html=True)\r\nst.write('A image classifier based on the Food 101 dataset')\r\ncol1, col2 = st.beta_columns(2)\r\n\r\n\r\n# Asking for file\r\nfile = col2.file_uploader(\"Upload an image of food\", type=[\"png\", \"jpg\"])\r\n#food images list\r\nsam_lst = ['None', 'Icecream', 'Pizza', 'Waffels', 'Steak']\r\n# random greet !!!\r\ng_lst = ['you ordered >> ', 'so you like >> ', 'want to have some >> ', \"your today's lunch >> \", \"your favorite food is >> \", \"serving >> \"]\r\n# getting random greeting\r\ngreet = random.choice(g_lst)\r\n\r\n\r\n# function for predicting food class with a custom image\r\ndef predict_class(file, greet):\r\n \"\"\"Functon that will prepare the images and will predict the class\"\"\"\r\n img = Image.open(file)\r\n img2 = img.copy()\r\n img2.resize((300, 300))\r\n col1.image(img2,caption=f\"Looks Delicious!! \", use_column_width=True)\r\n # converting the image to a numpy array\r\n img_array = np.array(img)\r\n # reshaping the image to a 4d tensor usable by the model\r\n img = tf.image.resize(img_array, size=(224,224))\r\n img = tf.expand_dims(img, axis=0)\r\n pred = model.predict(img)\r\n pred_cls = class_names[pred.argmax()]\r\n col2.success(greet + pred_cls) # showing the prediction class name\r\n\r\n\r\n# prdeicting the class of the image from the file / custome image and samples\r\nif file is not None:\r\n with st.spinner('Hold on your food is getting cooked...'):\r\n predict_class(file, greet)\r\nelse:\r\n col1.warning(\"No image uploaded. You can use sample imgaes from below list\")\r\n file2 = col1.selectbox('Select from sample images', options=sam_lst)\r\n if file2 == 'Icecream':\r\n file = 'icecream.jpg'\r\n with st.spinner('Hold on your food is getting cooked...'):\r\n predict_class(file, greet)\r\n elif file2 == 'Pizza':\r\n file = 'pizza.jpg'\r\n with st.spinner('Hold on your food is getting cooked...'):\r\n predict_class(file, greet)\r\n elif file2 == 'Waffels': \r\n file = 'waffels.jpg'\r\n with st.spinner('Hold on your food is getting cooked...'):\r\n predict_class(file, greet) \r\n elif file2 == 'Steak':\r\n file = 'steak.jpg'\r\n with st.spinner('Hold on your food is getting cooked...'): \r\n predict_class(file, greet) \r\n else:\r\n pass\r\n \r\nnote = \"\"\" \r\n\\n\r\nThis project based on the [Food101](https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101/) Paper which used Convolutional Neuranetwork trained for 2 to 3 days to achieve 77.4% top-1 accuracy.\r\nThe project is made by download the food101 dataset from the [TensorFlow dataset](https://www.tensorflow.org/datasets/catalog/food101)(size: 4.6GB) which consists of 750 images x 101 training classes = 75750 training images.\r\nI used the [EfficientNetB0](https://www.tensorflow.org/api_docs/python/tf/keras/applications/EfficientNetB0) model with fine-tune unfreeze all layers of the model. \\n\r\nAlthough this WebApp model accuracy is around 80% to 82%. I am also sharing the [notebook](https://colab.research.google.com/drive/15sJJhrZBo12CA3flnrX-NC4WwrP84z0D?usp=sharing) for this project.\r\n[Github](https://github.com/subha996/food-101_updated)\r\n\"\"\"\r\nst.write(note)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nwith st.beta_expander('Food Names(Classes), The model will work better if you chose food from this list'):\r\n st.write(class_names)\r\n","repo_name":"subha996/food-101_updated","sub_path":"food101.py","file_name":"food101.py","file_ext":"py","file_size_in_byte":5586,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"17250694840","text":"#!/usr/bin/env python\n\"\"\"\nThis file contains LeNet-5 training script\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n#-------------------------------------------------------------------------------\n__author__ = \"Ando Ki\"\n__copyright__ = \"Copyright 2020 Ando Ki\"\n__credits__ = [\"none\", \"some\"]\n__license__ = \"The 2-Clause BSD License\"\n__version__ = \"0\"\n__revision__ = \"1\"\n__maintainer__ = \"Ando Ki\"\n__email__ = \"contact@future-ds.com\"\n__status__ = \"Development\"\n__date__ = \"2020.10.01\"\n__description__= \"LeNet-5 network model training script\"\n\n#-------------------------------------------------------------------------------\n# Note it saves parameter-only and model-included as swll.\n#-------------------------------------------------------------------------------\nimport argparse\nimport shutil\nimport os\nimport sys\n\nimport numpy as np\n\nimport torch\nfrom torchvision.datasets import mnist\nfrom torch.nn import CrossEntropyLoss\nfrom torch.optim import SGD\nfrom torch.utils.data import DataLoader\nimport torchvision.transforms as transforms\n# Following causes AttributeError: 'SummaryWriter' object has no attribute 'export_scalars_to_json'\n#from torch.utils.tensorboard import SummaryWriter\nfrom tensorboardX import SummaryWriter\n\nfrom darknet_lenet5_utils import *\nfrom lenet5_model import Lenet5Model\n\n#-------------------------------------------------------------------------------\ndef get_dataset( args ):\n \"\"\"\n It prepares MNIST dataset.\n \"\"\"\n train_dataset = mnist.MNIST( root='dataset.train' # see train/MNIST/\n , train=True\n , download=True\n , transform=transforms.Compose([\n transforms.Resize((32, 32))\n ,transforms.Grayscale(num_output_channels=args.input_channels) # make 3 channels (does not work)\n ,transforms.ToTensor()]))\n test_dataset = mnist.MNIST( root='dataset.test'\n , train=False\n , download=True\n , transform=transforms.Compose([\n transforms.Resize((32, 32))\n ,transforms.Grayscale(num_output_channels=args.input_channels) # make 3 channels (does not work)\n ,transforms.ToTensor()]))\n train_loader = DataLoader( train_dataset\n , batch_size=args.batch_size\n , num_workers=8)\n test_loader = DataLoader( test_dataset\n , batch_size=args.batch_size\n , num_workers=8)\n return train_loader, test_loader\n\n#-------------------------------------------------------------------------------\ndef build_model( args ):\n \"\"\"\n It build LeNet-5 model and load checkpoint if specified.\n \"\"\"\n if args.pre_trained_type == 'none':\n model = Lenet5Model(args.input_channels)\n else:\n extension = os.path.splitext(args.pre_trained_weights)[1]\n if extension == '.pkl': # args.pre_trained_weights.endswitch('.pkl')\n model = torch.load(args.pre_trained_weights)\n elif extension == '.pth':\n if args.pre_trained_type == 'model':\n model = torch.load(args.pre_trained_weights)\n elif args.pre_trained_type == 'params':\n model = Lenet5Model(args.input_channels)\n model.load_state_dict(torch.load(args.pre_trained_weights))\n else:\n print(f\"Model type {args.pre_trained_type} not known\")\n return None, None, None\n elif extension == '.onnx':\n model = torch.onnx.load(args.pre_trained_weights)\n torch.onnix.checker.check_model(model)\n elif extension == '.weights':\n model = Lenet5Model(args.input_channels)\n load_weights(model, args.pre_trained_weights)\n else:\n print(\"un-known data file: \", args.pre_trained_weights);\n return None, None, None\n optimizer = SGD(model.parameters(), lr=args.learning_rate)\n cross_error = CrossEntropyLoss() # loss function\n return model, optimizer, cross_error\n\n#-------------------------------------------------------------------------------\ndef train_one_mini_batch( args\n , model\n , images # input images\n , labels # expected label for the input images\n , cross_error # error function\n , optimizer # otptimizer\n ):\n \"\"\"\n It runs a train on a mini-batch, which consists of a number of images.\n \"\"\"\n predicts = model(images.float())\n error = cross_error(predicts, labels.long()) # CrossEntropyLoss(calculated, expected)\n optimizer.zero_grad()\n error.backward() # loss\n optimizer.step()\n return error\n\n#-------------------------------------------------------------------------------\ndef evaluate_one_mini_batch( args\n , model\n , images # input images\n , labels # expected label\n ):\n \"\"\"\n It runs an evaluation on a mini-batch, which consists of a number of images.\n \"\"\"\n predicts = model(images.float()).detach()\n predicts_ys = np.argmax(predicts, axis=-1) # get id of max value\n matched = predicts_ys == labels\n correct = np.sum(matched.numpy(), axis=-1) # num of mached\n sum = matched.shape[0] # number of items (images) in the mini-batch\n return correct, sum\n\n#-------------------------------------------------------------------------------\ndef save_checkpoint( args\n , model\n , accuracy\n , epoch\n ):\n \"\"\"\n It ssave 'checkpoint' if required.\n It return 'True' for end-condition.\n \"\"\"\n if not hasattr(save_checkpoint, \"accuracy_old\"):\n save_checkpoint.accuracy_old = 0\n if accuracy>accuracy_old:\n torch.save(model, f\"{args.checkpoints}{os.sep}mnist_model_{accuracy:.3f}.pth\")\n torch.save(model.state_dict(), f\"{args.checkpoints}{os.sep}mnist_params_{accuracy:.3f}.pth\")\n dummy_input = torch.randn(1, args.input_channels, 32, 32, requires_grad=True)\n #batch_size, input_channel, input_height, input_width\n torch.onnx.export(model, dummy_input,\n f\"{args.checkpoints}{os.sep}mnist_model_{accuracy:.3f}.onnx\")\n if (not args.keep) and (f\"{accuracy_old:.3f}\" != f\"{accuracy:.3f}\"):\n pathX = f\"{args.checkpoints}{os.sep}mnist_model_{accuracy_old:.3f}.pth\"\n if os.path.exists(pathX): os.remove(pathX)\n pathX = f\"{args.checkpoints}{os.sep}mnist_params_{accuracy_old:.3f}.pth\"\n if os.path.exists(pathX): os.remove(pathX)\n pathX = f\"{args.checkpoints}{os.sep}mnist_model_{accuracy_old:.3f}.onnx\"\n if os.path.exists(pathX): os.remove(pathX)\n pathX = f\"{args.checkpoints}{os.sep}mnist_{accuracy_old:.3f}.weights\"\n if os.path.exists(pathX): os.remove(pathX)\n save_checkpoint.accuracy_old = accuracy\n \n if (float(accuracy)>=float(args.accuracy)):\n torch.save(model, f\"{args.checkpoints}{os.sep}mnist_model_final.pth\")\n torch.save(model.state_dict(), f\"{args.checkpoints}{os.sep}mnist_params_final.pth\")\n dummy_input = torch.randn(1, args.input_channels, 32, 32, requires_grad=False)\n torch.onnx.export(model, dummy_input, '{}/mnist_model_final.onnx'.format(args.checkpoints))\n save_weights(model, f\"{args.checkpoints}{os.sep}mnist_final.weights\")\n print(f\"Look {args.checkpoints}{os.sep}mnist_model_final.pth\")\n print(f\"Look {args.checkpoints}{os.sep}mnist_params_final.pth\")\n print(f\"Look {args.checkpoints}{os.sep}mnist_model_final.onnx\")\n print(f\"Look {args.checkpoints}{os.sep}mnist_final.weights\")\n return True\n elif epoch == (args.epochs-1):\n torch.save(model, f\"{args.checkpoints}{os.sep}mnist_model_last.pth\")\n torch.save(model.state_dict(), f\"{args.checkpoints}{os.sep}mnist_params_last.pth\")\n dummy_input = torch.randn(1, args.input_channels, 32, 32, requires_grad=False)\n torch.onnx.export(model, dummy_input, '{}/mnist_model_last.onnx'.format(args.checkpoints))\n save_weights(model, f\"{args.checkpoints}{os.sep}mnist_last.weights\")\n print(f\"Look {args.checkpoints}{os.sep}mnist_model_last.pth\")\n print(f\"Look {args.checkpoints}{os.sep}mnist_param_last.pth\")\n print(f\"Look {args.checkpoints}{os.sep}mnist_model_last.onnx\")\n print(f\"Look {args.checkpoints}{os.sep}mnist_last.weights\")\n return False\n\n#-------------------------------------------------------------------------------\nif __name__ == '__main__':\n def get_args():\n parser = argparse.ArgumentParser(description='PyTorch LeNet-5')\n parser.add_argument('-i', '--input_channels', type=int, default=1,\n metavar='input_channels',\n help='input channel size (default: 1)')\n parser.add_argument('-b', '--batch_size', type=int, default=100, # 60,000/100=600 iteration==> one epoch\n metavar='batch_size',\n help='input batch size (default: 100)')\n parser.add_argument('-e', '--epochs', type=int, default=100,\n metavar='epochs',\n help='number of epochs to train (default: 100)')\n parser.add_argument('-l', '--learning_rate', type=float, default=0.1,\n metavar='learning_rate',\n help='learning rate (default: 0.1)')\n parser.add_argument('-a', '--accuracy', type=float, default=0.99,\n metavar='accuracy',\n help='accuracy (default: 0.99)')\n parser.add_argument('-c', '--checkpoints', type=str, default=\"checkpoints\",\n metavar='checkpoints',\n help='directory name for checkpoint (default: checkpoints)')\n parser.add_argument('-t', '--pre_trained_type', type=str\n ,choices=[\"params\", \"model\", \"weights\", \"none\"]\n ,default=\"none\"\n ,metavar='type'\n ,help='type of pre-trained weights: \\\"model\\\", \\\"params\\\", \\\"weights\\\", or \\\"none\\\" (default: \\\"model\\\")')\n parser.add_argument('-w', '--pre_trained_weights', type=str, default=\"checkpoints/mnist_params_final.pth\",\n metavar='file_name',\n help=\"pre-trained weight or model path_file_name for checkpoint when '--type' is not none (default: checkpoints/mnist_final.pth)\")\n parser.add_argument('-g', '--logdir', type=str, default=\"tensorboard\",\n metavar='logdir',\n help='directory name for log (default: tensorboard)')\n parser.add_argument('-k', '--keep', action='store_true',\n help='make keep intermediate weights (default: False)')\n parser.add_argument('-r', '--rigor', action='store_true',\n help='set rigor (default: False)')\n parser.add_argument('-v', '--verbose', action='store_true',\n help='make verbose (default: False)')\n parser.add_argument('-d', '--debug', action='store_true',\n help='make debug (default: False)')\n args = parser.parse_args()\n return args\n\n#-------------------------------------------------------------------------------\nif __name__ == '__main__':\n args = get_args()\n\n if not os.path.exists(args.checkpoints): os.makedirs(args.checkpoints)\n\n train_loader, test_loader = get_dataset( args )\n model, optimizer, cross_error = build_model( args )\n model.train() # let the model know it is training, i.e., it sets the mode to train\n\n if args.debug:\n items = list(model.__dict__['_modules'].items())[0][1]\n print(f\"{items[0].__class__.__name__}.bias.data={items[0].bias.data}\")\n\n if os.path.isdir(args.logdir): shutil.rmtree(args.logdir)\n os.makedirs(args.logdir)\n log = SummaryWriter(args.logdir)\n log.add_graph(model, torch.rand(args.batch_size, args.input_channels, 32, 32))\n\n if args.debug:\n print(f\"{items[0].__class__.__name__}.bias.data={items[0].bias.data}\")\n\n if args.verbose:\n # Print model and optimizer and cross_error\n print(model)\n print(model.__dict__['_modules'])\n print(optimizer)\n print(cross_error)\n # Print model's state_dict\n print(\"Model's state_dict:\")\n for param_tensor in model.state_dict():\n print(param_tensor, \"\\t\", model.state_dict()[param_tensor].size())\n\n # Print optimizer's state_dict\n print(\"Optimizer's state_dict:\")\n for var_name in optimizer.state_dict():\n print(var_name, \"\\t\", optimizer.state_dict()[var_name])\n\n accuracy_old=0\n for epoch in range(args.epochs):\n for idx, (train_x, train_label) in enumerate(train_loader):\n # idx: 0 to (num of mini-batches 600,000/100 -1 )[0:599]\n # train_x: 100 images of size 32x32\n # train_label: 100 elements\n model.train() # set the mode to train\n error = train_one_mini_batch(args, model, train_x, train_label, cross_error, optimizer)\n if idx % (args.batch_size) == 0: # print error after each batch\n print('idx: {}, error: {}'.format(idx, error))\n\n correct = 0\n sum = 0\n for idx, (test_x, test_label) in enumerate(test_loader):\n model.eval() # set the mode to evaluate (not to train)\n c, s = evaluate_one_mini_batch(args, model, test_x, test_label)\n correct += c # accumulate the num of mached\n sum += s # accumulate the number of items\n accuracy = correct/sum # ratio of correct from sum\n print(f\"epoch: {epoch}, accuracy: {accuracy}\")\n print(\"----------------------------------\")\n\n log.add_scalar('Train/accuracy', accuracy, epoch)\n log.add_scalar('Train/error', error, epoch)\n\n if save_checkpoint(args, model, accuracy, epoch):\n break\n\n log.export_scalars_to_json(args.logdir + os.sep + \"all_logs.json\")\n log.close()\n\n#===============================================================================\n# Revision history:\n#\n# 2020.10.01: Started by Ando Ki (adki@future-ds.com)\n#===============================================================================\n","repo_name":"adki/DLR_Projects","sub_path":"LeNet-5/LeNet-5.pytorch/src/lenet5_train.py","file_name":"lenet5_train.py","file_ext":"py","file_size_in_byte":14936,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"}
+{"seq_id":"14036220159","text":"from astral import LocationInfo\nfrom astral.sun import sun\nimport datetime\nimport pyowm\nimport time\n# Libraries needed to run this class.\n\nowm = pyowm.OWM(\"fc1d78e5bdb6e3e613600e91faa22e79\")\nmgr = owm.weather_manager()\n# Stuff needed for OpenWeatherMap to work, including a subscription code.\n\n\nclass WeatherService:\n def __init__(self, latitude, longitude, unix):\n self.lat = latitude\n self.lon = longitude\n self.unix = int(unix)\n # Class is asking for a latitude, longitude, and unix timestamp for the ISS's flyover.\n\n self.now = time.time()\n self.hours = int((self.unix - self.now) / 3600)\n self.days = int((self.unix - self.now) / 86400)\n # Hours and days from now.\n\n self.year = int(datetime.datetime.fromtimestamp(self.unix).strftime(\"%Y\"))\n self.month = int(datetime.datetime.fromtimestamp(self.unix).strftime(\"%m\"))\n self.day = int(datetime.datetime.fromtimestamp(self.unix).strftime(\"%d\"))\n self.minute = int(datetime.datetime.fromtimestamp(self.unix).strftime(\"%M\"))\n self.hour = int(datetime.datetime.fromtimestamp(self.unix).strftime(\"%H\"))\n if self.minute >= 30:\n self.hour += 1\n # The year, month and day of the inputted unix timestamp.\n\n self.one_call = mgr.one_call(lat=self.lat, lon=self.lon)\n self.cloudiness = int()\n # Location and cloud stuff for using OpenWeatherMap with PyOWM.\n\n self.loc = LocationInfo(timezone=\"Europe/London\", latitude=self.lat, longitude=self.lon)\n self.s = sun(self.loc.observer, date=datetime.date(self.year, self.month, self.day))\n # Location info stuff needed to find out when the sun goes up and down (thank you StackOverflow).\n self.sunup = str()\n self.sundown = str()\n self.uphour = int()\n self.downhour = int()\n # Sun up/down stuff.\n\n self.visible = False\n # Making the assumption that the night sky isn't visible.\n\n def clearsky(self):\n if self.hours < 0:\n return \"Error: Negative timestamp\"\n # If the unix timestamp is in the past.\n\n elif 0 <= self.hours <= 47:\n self.cloudiness = int(self.one_call.forecast_hourly[self.hours].clouds)\n # If the unix timestamp is within 48 hours from now, we can get an hourly cloud check.\n\n elif self.days <= 6:\n self.cloudiness = int(self.one_call.forecast_daily[self.days].clouds)\n # If the unix timestamp is greater than 48 hours from now, we can get daily cloud checks for up to 7 days from\n # now.\n\n else:\n return \"Error: Timestamp too far into the future\"\n # We can't get any weather data further than a week from now.\n\n self.sunup = str(self.s[\"sunrise\"]).split(\" \")[1].split(\".\")[0]\n self.sundown = str(self.s[\"sunset\"]).split(\" \")[1].split(\".\")[0]\n # Getting the time (hours:minutes:seconds) of sunset and sunrise on the day of the unix timestamp.\n\n self.uphour = int(self.sunup.split(\":\")[0])\n if int(self.sunup.split(\":\")[1]) >= 30:\n self.uphour += 1\n # Getting the time of sunrise rounded to nearest hour.\n\n self.downhour = int(self.sundown.split(\":\")[0])\n if int(self.sundown.split(\":\")[1]) >= 30:\n self.downhour += 1\n # Getting the time of sunset rounded to nearest hour.\n\n if self.cloudiness < 25 and (self.uphour > self.hour or self.downhour < self.hour):\n self.visible = True\n # If there is less than 25% clouds and the unix time is before sunrise or after sunset.\n\n return self.visible\n # Return a bool. True if the nightsky is visible.\n\n\naarhus = WeatherService(56.158150, 10.212030, 1644500918)\n\nprint(aarhus.clearsky())\n","repo_name":"emiln2002/ISS_tracker","sub_path":"WeatherService.py","file_name":"WeatherService.py","file_ext":"py","file_size_in_byte":3791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"1711401337","text":"# test.py - a test build of a new and improved terminal RPG engine\r\n'''\r\n Implement TODO:\r\n -local multiplayer combat isn't working. Sockets are hard, who knew?\r\n Probably won't get finished since it's a huge hassle for such little\r\n return but it was close! It worked locally at least, so that's cool. \r\n I'll probably keep the local element just for demonstration purposes.\r\n'''\r\nfrom msvcrt import getch\r\nimport sys\r\nimport os\r\nimport socket\r\nimport textwrap\r\nimport random\r\nimport ast\r\n\r\nimport faces\r\nfrom client import clientData\r\nfrom data import mapData\r\nfrom data import playerData\r\nfrom data import enemyData\r\nfrom data import enemyClass\r\nfrom maps import gamePlayMaps\r\nfrom maps import staticMaps\r\nfrom mapChange import changeMap\r\nimport subprocess\r\n\r\n\r\n\r\n## Runs game opening text of story and controls\r\nimport open_text\r\n\r\n\r\n\r\n\r\n####################################\r\n##### GLOBAL DATA #####\r\n####################################\r\n\r\nclass globalStates():\r\n\r\n ## gold won in combat to be printed later\r\n won_gold = 0\r\n \r\n # Gameplay map currently being rendered\r\n current_map = gamePlayMaps.map1\r\n \r\n # Gameplay commands used from player input for map - first assigned at runtime\r\n current_commands = None\r\n \r\n # Map to return to from pause menu\r\n return_map = None\r\n\r\n ## List of static maps for referencing in play loop\r\n static_maps = [staticMaps.menu, staticMaps.console_menu, \r\n staticMaps.shop_menu, staticMaps.upgrade_menu,\r\n staticMaps.multiplayer_menu]\r\n combat_maps = [staticMaps.combat_map_enemy, staticMaps.combat_map_pl]\r\n\r\n\r\n\r\n####################################\r\n##### MAP FUNCTIONS #####\r\n####################################\r\n\r\ndef overwrite_map(w_map):\r\n \r\n for i in range(len(w_map)):\r\n for j in range(len(w_map[0])):\r\n if w_map[i][j] == 'X':\r\n w_map[i][j] = ' '\r\n\r\n\r\ndef print_map(p_map):\r\n\r\n ## Print player data at top of map\r\n print(f'\\n HP: {str(playerData.HP)}/{str(playerData.MAX_HP)} G: {str(playerData.GOLD)}')\r\n\r\n ## Print the Map\r\n for i in range(len(p_map)):\r\n for j in range(len(p_map[0])):\r\n print(p_map[i][j], end='')\r\n print()\r\n\r\n\r\ndef print_combat_screen(turn):\r\n\r\n ## Set map to players turn\r\n if turn == 1 or turn == 0:\r\n globalStates.current_map = staticMaps.combat_map_pl\r\n if turn == 2:\r\n globalStates.current_map = staticMaps.combat_map_enemy\r\n ## Print the map\r\n print(globalStates.current_map)\r\n\r\n\r\n ## print combatants health\r\n print(f' HP: {playerData.HP}/{playerData.MAX_HP} {enemyData.current_enemy.rank} {enemyData.current_enemy.name}: {enemyData.current_enemy.hp}')\r\n if turn == 0:\r\n print(f'\\n\\t[1] Laser Cannon\\n\\t[2] Missile ({str(playerData.MISSILES)})\\n\\t[3] Repairkit ({str(playerData.REPAIRKITS)})')\r\n\r\n\r\n\r\n\r\n###################################\r\n###### DIALOGUE / CONVO ######\r\n##################################\r\n\r\ndef dialoguePrompt(face, messages):\r\n print_face = face\r\n print_message = random.choice(messages)\r\n\r\n clear()\r\n print(f'\\t\\n {enemyData.current_enemy.rank} {enemyData.current_enemy.name} says:')\r\n print(random.choice(print_face))\r\n text = textwrap.wrap(print_message, 25)\r\n for j in range(len(text)):\r\n print(text[j].center(36))\r\n getch()\r\n\r\n\r\n# ### cut ###\r\n# def speakableList(cur_map):\r\n\r\n# ##### MAP 2 #####\r\n# ## GIRL\r\n# if cur_map == gamePlayMaps.map2:\r\n# if (cur_map[mapData.pos_x][mapData.pos_y+1] == 'Q') or \\\r\n# (cur_map[mapData.pos_x][mapData.pos_y-1] == 'Q'):\r\n# dialoguePrompt(faces.girl.face, faces.girl.messages)\r\n\r\n\r\n\r\n\r\n\r\n##################################################\r\n###### FUNCTIONALITY AND GAMEPLAY ######\r\n##################################################\r\n\r\ndef clear():\r\n os.system('cls')\r\n\r\n\r\n\r\n################################################\r\n####### INPUT AND COMMANDS #######\r\n################################################\r\n\r\n\r\n## This is the bulk of movement control. This command-function holds the main inputs\r\n# by players and does collision detection before making the move. If the move is through\r\n# a door, this is handled by the if/else blocks inside the cmd='' blocks. It basically\r\n# checks if the move is going to be into a door, and if it is, it returns True. This is\r\n# returned in the main loop, where a true return results in that info being sent to another\r\n# function to load the map in. \r\ndef map_commands(cmd, cur_map):\r\n \r\n if cmd == 'w' and cur_map[mapData.pos_x - 1][mapData.pos_y] not in mapData.walls:\r\n if cur_map[mapData.pos_x - 1][mapData.pos_y] in mapData.doors:\r\n return True\r\n else:\r\n mapData.pos_x -= 1\r\n \r\n elif cmd == 's' and cur_map[mapData.pos_x + 1][mapData.pos_y] not in mapData.walls:\r\n if cur_map[mapData.pos_x + 1][mapData.pos_y] in mapData.doors:\r\n return True\r\n else:\r\n mapData.pos_x += 1\r\n \r\n elif cmd == 'a' and cur_map[mapData.pos_x][mapData.pos_y - 1] not in mapData.walls:\r\n if cur_map[mapData.pos_x][mapData.pos_y - 1] in mapData.doors:\r\n return True\r\n else:\r\n mapData.pos_y -= 2\r\n \r\n elif cmd == 'd' and cur_map[mapData.pos_x][mapData.pos_y + 1] not in mapData.walls:\r\n if cur_map[mapData.pos_x][mapData.pos_y + 1] in mapData.doors:\r\n return True\r\n else:\r\n mapData.pos_y += 2\r\n \r\n\r\n ## exit command\r\n elif cmd == '\\x1b':\r\n clear()\r\n print('\\n\\n > EXIT? [ENTER] TO CONFIRM')\r\n confirmation = bytes.decode(getch())\r\n\r\n if confirmation == '\\r':\r\n sys.exit(0)\r\n else:\r\n pass\r\n \r\n\r\n # ## Enter on command console\r\n elif cmd == '\\r' and tuple((mapData.pos_x, mapData.pos_y)) in mapData.command_console_positions:\r\n globalStates.current_commands = console_commands\r\n globalStates.return_map = globalStates.current_map\r\n globalStates.current_map = staticMaps.console_menu\r\n\r\n ## shop console\r\n elif cmd == '\\r' and tuple((mapData.pos_x, mapData.pos_y)) in mapData.shop_console_positions:\r\n globalStates.current_commands = shop_commands\r\n globalStates.return_map = globalStates.current_map\r\n globalStates.current_map = staticMaps.shop_menu\r\n\r\n ## upgrade console\r\n elif cmd == '\\r' and tuple((mapData.pos_x, mapData.pos_y)) in mapData.upgrade_console_positions:\r\n globalStates.current_commands = upgrade_commands\r\n globalStates.return_map = globalStates.current_map\r\n globalStates.current_map = staticMaps.upgrade_menu\r\n\r\n\r\n\r\n \r\n\r\ndef console_commands(cmd, cur_map):\r\n \r\n ## Combat\r\n if cmd == '1': \r\n\r\n ### initialize enemy to fight ###\r\n ## Random rank var\r\n rank = random.choice(enemyData.ranks)\r\n \r\n ## Random name var\r\n name = random.choice(enemyData.names)\r\n\r\n\r\n ## randomize hp and basedam \r\n ## These are calculated based on the players current stat level\r\n ## --prone to tweaking, balance is still in progress\r\n en_hp = random.randint(int(round(playerData.STATS / 5)), int(round(playerData.STATS / 2.5))) + 6\r\n en_basedam = random.randint(int(round(playerData.STATS / 25)), int(round(playerData.STATS / 18)))\r\n \r\n ## if the base damage is rounded down to 0, set to 1- enemeies need to always do damage\r\n if en_basedam < 1:\r\n en_basedam = 1\r\n\r\n \r\n ## initialize randomized enemy instance, assign its above info and add the face and messages\r\n enemyData.current_enemy = enemyClass(rank, name, en_hp, en_basedam, faces.shopkeeper.face, faces.shopkeeper.messages)\r\n \r\n ## enemy speaks before combat\r\n dialoguePrompt(enemyData.current_enemy.face, enemyData.current_enemy.messages)\r\n\r\n ## combat map/cmds\r\n globalStates.current_commands = combat_commands\r\n globalStates.current_map = staticMaps.combat_map_enemy\r\n\r\n # player gold reward for victory assigned beforehand so sue me. if you die you lose anyway \r\n won_gold = int(round(enemyData.current_enemy.hp / 2)) + random.randint(2, 8)\r\n playerData.STATS += int(round(enemyData.current_enemy.hp / 8)) ### maybe just make it like 2 or 3 \r\n playerData.GOLD += won_gold\r\n globalStates.won_gold = won_gold\r\n\r\n\r\n ## goto SOCKET MULTIPLAYER\r\n elif cmd == '2': \r\n globalStates.current_map = staticMaps.multiplayer_menu\r\n globalStates.current_commands = multiplayer_lobby_commands\r\n \r\n \r\n ## SAVE \r\n elif cmd == '3':\r\n clear()\r\n save_game()\r\n\r\n ## LOAD\r\n elif cmd == '4':\r\n clear()\r\n load_game()\r\n\r\n else:\r\n globalStates.current_map = globalStates.return_map\r\n globalStates.current_commands = map_commands\r\n\r\n\r\ndef shop_commands(cmd, cur_map):\r\n\r\n gp = playerData.GOLD\r\n no_gp = '\\n ** NOT ENOUGH GOLD **'\r\n\r\n if cmd == '1':\r\n if gp >= 10:\r\n playerData.REPAIRKITS += 1\r\n print('\\n +REPAIRKIT PURCHASED+')\r\n\r\n playerData.GOLD -= 10\r\n playerData.STATS += 1\r\n getch()\r\n\r\n else:\r\n print(no_gp)\r\n getch()\r\n\r\n\r\n elif cmd == '2':\r\n if gp >= 10:\r\n playerData.MISSILES += 1\r\n print('\\n +MISSILE PURCHASED+')\r\n \r\n playerData.GOLD -= 10\r\n playerData.STATS += 1\r\n getch()\r\n\r\n else:\r\n print(no_gp)\r\n getch()\r\n\r\n else:\r\n globalStates.current_commands = map_commands\r\n globalStates.current_map = globalStates.return_map\r\n\r\n\r\ndef upgrade_commands(cmd, cur_map):\r\n\r\n gp = playerData.GOLD\r\n no_gp = '\\n ** NOT ENOUGH GOLD **'\r\n \r\n if cmd == '1':\r\n if gp >= 20:\r\n playerData.CANNON_DAM += 1\r\n print('\\n +CANNON UPGRADED +')\r\n\r\n playerData.GOLD -= 20\r\n playerData.STATS += 1\r\n getch()\r\n\r\n else:\r\n print(no_gp)\r\n getch()\r\n\r\n\r\n elif cmd == '2':\r\n if gp >= 15:\r\n playerData.MAX_HP += 5\r\n playerData.HP = playerData.MAX_HP\r\n print('\\n +SHIELD UPGRADED +')\r\n\r\n playerData.GOLD -= 15\r\n playerData.STATS += 1\r\n getch()\r\n\r\n else:\r\n print(no_gp)\r\n getch()\r\n\r\n else:\r\n globalStates.current_commands = map_commands\r\n globalStates.current_map = globalStates.return_map\r\n\r\n\r\n\r\ndef multiplayer_lobby_commands(cmd, cur_map):\r\n \r\n ## JOIN SERVER\r\n if cmd == '1':\r\n clear()\r\n\r\n ## input port #\r\n port = input('\\n\\n\\t!-SAVE YOUR GAME BEFORE CONNECTING\\n\\n\\t>> ENTER SERVERS PORT NUMBER\\n\\t>> ')\r\n \r\n if port == '':\r\n pass\r\n \r\n ## if port is too high or low, return to menu\r\n elif int(port) > 65000 or int(port) < 6000:\r\n print('\\n\\t>> THIS PORT NUMBER IS OUT OF VALID RANGE <<')\r\n getch()\r\n\r\n ## try to validate port and connect\r\n else:\r\n clear()\r\n connected = False\r\n \r\n HOST = '127.0.0.1'\r\n \r\n clientData.PORT = int(port)\r\n CLIENT = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\n ## Attempt connection. If refused or failed to connect, report to player\r\n try:\r\n CLIENT.connect((HOST, int(port)))\r\n connected = True\r\n\r\n except ConnectionRefusedError:\r\n clear()\r\n print('\\n\\n\\t>> CONNECTION REFUSED <<\\n\\n\\t port may be invalid')\r\n getch()\r\n \r\n if connected:\r\n \r\n ## assign player variables to vars in client data\r\n clientData.PL_HP = playerData.HP\r\n clientData.PL_MAX_HP = playerData.MAX_HP\r\n clientData.PL_DAM = playerData.CANNON_DAM\r\n clientData.PL_MISSILES = playerData.MISSILES\r\n clientData.PL_REPAIRKITS = playerData.REPAIRKITS\r\n \r\n ## Connect to server and send player stats\r\n try:\r\n clientData.client_connect(CLIENT)\r\n clientData.send_stats(CLIENT)\r\n\r\n except ConnectionResetError:\r\n print('\\n\\t>> CONNECTION LOST\\n\\t>> SERVER MAY HAVE CRASHED OR BEEN CLOSED')\r\n getch()\r\n return\r\n\r\n ## run game loop\r\n loot_plus = clientData.main_loop(CLIENT)\r\n\r\n ## Get loot\r\n playerData.GOLD += loot_plus\r\n\r\n getch()\r\n\r\n ## run client commands, let client.py take over here\r\n\r\n\r\n ## RUN SERVER ##\r\n elif cmd == '2':\r\n\r\n clear()\r\n\r\n ## print port data and help\r\n print('''\r\n [*] PORT MUST BE BELOW 65000 AND OVER 6000.\r\n IF PORT IS VALID, SERVER WILL OPEN IN A \r\n NEW TERMINAL WINDOW AND RUN IN THE BACK-\r\n GROUND. AFTER CREATING SERVER, BOTH THE\r\n PLAYERS MUST CONNECT FROM PREVIOUS MENU. \r\n ''')\r\n\r\n ## get port #\r\n port = input('\\n\\n\\t>> ENTER A VALID PORT NUMBER\\n\\t>> ')\r\n\r\n ## break by entering nothing\r\n if port == '':\r\n pass\r\n \r\n ## if port is too high or low, return to menu\r\n elif int(port) > 65000 or int(port) < 6000:\r\n print('\\n\\t>> THIS PORT NUMBER IS OUT OF VALID RANGE <<')\r\n getch()\r\n\r\n ## otherwise run server with port num as cmdline argument\r\n else:\r\n ## get current dir for filepath to run server\r\n dir_path = os.getcwd()\r\n ## run server. start is the command, cmd is the window, /K means open and keep it open\r\n os.system('start cmd /K ' + dir_path + '\\\\server.py ' + port)\r\n\r\n else:\r\n globalStates.current_commands = console_commands\r\n globalStates.current_map = staticMaps.console_menu\r\n\r\n\r\n\r\ndef combat_commands(cmd, cur_map):\r\n\r\n turn_completed = False\r\n\r\n ## generate random number for critical roll. If 1, player misses attack. If 20, player does heavier critical attack\r\n critical_generator = random.randint(1,20)\r\n\r\n ## Cannon ##\r\n if cmd == '1':\r\n\r\n if critical_generator == 1:\r\n\r\n clear()\r\n print_combat_screen(1)\r\n print(f'\\n\\tYour Attack MISSES')\r\n getch()\r\n\r\n elif critical_generator == 20:\r\n \r\n ## Generate random critical damage amount from 1.5x player cannondam to 2x\r\n crit_damage = random.randint(int(round(playerData.CANNON_DAM * 1.5)), int(round(playerData.CANNON_DAM * 2.5))) + random.randint(1,4)\r\n\r\n ## Remove damage amt from enemy HP\r\n enemyData.current_enemy.hp -= crit_damage\r\n\r\n # Check if enemy health is less than 0, and set it to 0 if so- no Negative HP on screen!\r\n if enemyData.current_enemy.hp < 0:\r\n enemyData.current_enemy.hp = 0\r\n\r\n clear()\r\n\r\n # Update screen with damage results\r\n print_combat_screen(1)\r\n print(f'\\n\\tCritical Cannon hit for {str(crit_damage)} dam!')\r\n getch()\r\n\r\n else:\r\n # Generate randomized damage number from player base damage and a +4 upper range\r\n damage = random.randint(playerData.CANNON_DAM, playerData.CANNON_DAM + 4)\r\n\r\n # Remove damage number from enemy HP\r\n enemyData.current_enemy.hp -= damage\r\n\r\n # Check if enemy health is less than 0, and set it to 0 if so- no Negative HP on screen!\r\n if enemyData.current_enemy.hp < 0:\r\n enemyData.current_enemy.hp = 0\r\n\r\n clear()\r\n\r\n # Update screen with damage results\r\n print_combat_screen(1)\r\n print(f'\\n\\tCannon does {str(damage)} dam')\r\n getch()\r\n\r\n # Continue to enemy turn\r\n turn_completed = True\r\n\r\n\r\n ## Missiles ##\r\n elif cmd == '2':\r\n\r\n # Use missile if avaliable\r\n if playerData.MISSILES > 0:\r\n \r\n ## subtract missile from inventory\r\n playerData.MISSILES -= 1\r\n\r\n\r\n if critical_generator == 1:\r\n\r\n print_combat_screen(1)\r\n print(f'\\n\\tYour Missile MISSES')\r\n getch()\r\n\r\n elif critical_generator == 20:\r\n \r\n ## Generate random critical damage amount from 1.5x player cannondam to 2x\r\n crit_damage = random.randint(int(round(playerData.CANNON_DAM * 2.5)), int(round(playerData.CANNON_DAM * 4.5))) + random.randint(3,8)\r\n\r\n ## Remove damage amt from enemy HP\r\n enemyData.current_enemy.hp -= crit_damage\r\n\r\n # Check if enemy health is less than 0, and set it to 0 if so- no Negative HP on screen!\r\n if enemyData.current_enemy.hp < 0:\r\n enemyData.current_enemy.hp = 0\r\n\r\n clear()\r\n\r\n # Update screen with damage results\r\n print_combat_screen(1)\r\n print(f'\\n\\tCritical Missile hit for {str(crit_damage)} dam!')\r\n getch()\r\n \r\n else:\r\n ## Generate random number. Missile damage starts at +4 player basedamage and goes up to +9 added. Powerful shit.\r\n damage = random.randint(playerData.CANNON_DAM + 4, playerData.CANNON_DAM + 9)\r\n\r\n # Subtract missile damage from enemy HP\r\n enemyData.current_enemy.hp -= damage\r\n\r\n # No negative HP on screen.\r\n if enemyData.current_enemy.hp < 0:\r\n enemyData.current_enemy.hp = 0\r\n \r\n clear()\r\n\r\n ## Print damage report to screen\r\n print_combat_screen(1)\r\n print(f'\\n\\tMissile does {str(damage)} dam')\r\n getch()\r\n\r\n # Continue to enemy turn\r\n turn_completed = True\r\n \r\n # Otherwise, too bad sonnyboy.\r\n else:\r\n print('\\t** NO MISSILES **')\r\n getch()\r\n\r\n\r\n ## Repair ##\r\n elif cmd == '3':\r\n\r\n # Check for repairkit in inventory and that your health isn't maxed already.\r\n if playerData.REPAIRKITS > 0 and playerData.HP == playerData.MAX_HP:\r\n print('\\t** HEALTH MAXIMUM **')\r\n getch()\r\n\r\n # Otherwise, go through with repair-\r\n elif playerData.REPAIRKITS > 0:\r\n \r\n # Remove RKit from inventory\r\n playerData.REPAIRKITS -= 1\r\n\r\n ## calculate repairkit amount\r\n repair_amount = 10 + int(round(playerData.STATS * .08))\r\n\r\n ## If player health plus repairkit is more than their max hp, just make it equal max hp\r\n if (playerData.HP + repair_amount) > playerData.MAX_HP:\r\n playerData.HP = playerData.MAX_HP\r\n \r\n ## Otherwise add the repair amount to their HP\r\n else:\r\n playerData.HP += repair_amount\r\n\r\n clear()\r\n\r\n ## Update screen with repair data \r\n print_combat_screen(1)\r\n print(f'\\n\\tShip Repaired +{str(repair_amount)} hp')\r\n getch()\r\n \r\n ## End Turn\r\n turn_completed = True\r\n\r\n else:\r\n print('\\t** NO REPAIRKITS **')\r\n getch()\r\n\r\n else:\r\n pass\r\n\r\n ## Once turn is completed and the enemy isn't dead, go to enemy turn\r\n if turn_completed and enemyData.current_enemy.hp > 0:\r\n enemyTurn()\r\n\r\n\r\ndef enemyTurn():\r\n\r\n clear()\r\n\r\n ## Generate critical chances\r\n critical_generator = random.randint(1,17)\r\n\r\n ## Check for miss or crit hit\r\n if critical_generator == 1:\r\n print_combat_screen(2)\r\n print(f'\\n\\t{enemyData.current_enemy.name} MISSES!')\r\n getch()\r\n \r\n elif critical_generator == 2:\r\n\r\n crit_damage = random.randint(int(round(enemyData.current_enemy.basedam * 1.5)), int(round(enemyData.current_enemy.basedam * 2.5))) + 3\r\n\r\n ## Remove damage amount from player HP\r\n playerData.HP -= crit_damage\r\n\r\n ## No negative HP on screen\r\n if playerData.HP < 0:\r\n playerData.HP = 0\r\n \r\n ## Update screen with damage results\r\n print_combat_screen(2)\r\n print(f'\\n\\t{enemyData.current_enemy.name} CRITICALLY attacks for -{str(crit_damage)} dam')\r\n getch()\r\n\r\n else:\r\n\r\n ## Damage range for enemy based on their base damage with a +3 range\r\n player_dam = random.randint(enemyData.current_enemy.basedam, enemyData.current_enemy.basedam + 3)\r\n \r\n ## Remove damage amount from player HP\r\n playerData.HP -= player_dam\r\n\r\n ## No negative HP on screen\r\n if playerData.HP < 0:\r\n playerData.HP = 0\r\n \r\n ## Update screen with damage results\r\n print_combat_screen(2)\r\n print(f'\\n\\t{enemyData.current_enemy.name} attacks for -{str(player_dam)} dam')\r\n getch()\r\n\r\n ## Death screen if killed. If you see this stuff in game, do better next time I guess.\r\n if playerData.HP <= 0:\r\n clear()\r\n ## Change lol\r\n print('\\n\\n\\n\\tYOU HAVE DIED\\n\\n\\tsucks to fuckin suck fuckeroni')\r\n getch()\r\n sys.exit()\r\n\r\n\r\n\r\n####################################\r\n##### SAVE / LOAD #####\r\n####################################\r\n\r\ndef save_game():\r\n ## SAVE ORDER: xpos, ypos, current_map, pl_hp, pl_gold, missiles, maxhp, stats, repairkits, candam\r\n save_xpos = str(mapData.pos_x)\r\n save_ypos = str(mapData.pos_y)\r\n save_cur_map = str(mapData.load_map_list.index(globalStates.return_map))\r\n save_hp = str(playerData.HP)\r\n save_gold = str(playerData.GOLD)\r\n save_missiles = str(playerData.MISSILES)\r\n save_max_hp = str(playerData.MAX_HP)\r\n save_stats = str(playerData.STATS)\r\n save_repairkits = str(playerData.REPAIRKITS)\r\n save_cannon_dam = str(playerData.CANNON_DAM)\r\n\r\n\r\n clear()\r\n ## Print save files and ask for file input \r\n saveFiles = []\r\n for filename in os.listdir('.'):\r\n if filename.startswith('savegame'):\r\n saveFiles.append(filename)\r\n print('\\n --SAVE AS:')\r\n print(' [1] New Save')\r\n\r\n i = 2\r\n for file in saveFiles:\r\n print(' ['+str(i)+'] '+file)\r\n i +=1\r\n print('\\n ['+str(i)+'] Back')\r\n\r\n fileChoice = bytes.decode(getch())\r\n\r\n ## Name new save file\r\n invalidChars = ['?', '\\\\', '/', ':', '\"', '<', '>', '*']\r\n if fileChoice == str(i):\r\n return\r\n else:\r\n if fileChoice == '1':\r\n ## CHECK IF 7+ save files already\r\n if len(saveFiles) >= 7:\r\n print('\\n MAXIMUM NUMBER OF SAVE FILES REACHED')\r\n getch()\r\n return \r\n else:\r\n clear()\r\n print(' ENTER A NAME FOR FILE:')\r\n name = input(' > ')\r\n for i in invalidChars:\r\n if i in name:\r\n print('\\n #INVALID CHARACTERS IN NAME#')\r\n input()\r\n return\r\n openfile = open('savegame_'+name+'.txt', 'w+')\r\n \r\n ## Try to open and rewrite save file\r\n else:\r\n try:\r\n openfile = open(saveFiles[int(fileChoice)-2], 'w+')\r\n except (ValueError, IndexError):\r\n print('> Unable to save to this file')\r\n getch()\r\n return\r\n\r\n\r\n ## WRITE DATA TO FILE\r\n ## SAVE ORDER: xpos, ypos, current_map, pl_hp, pl_gold missiles, maxhp, stats\r\n openfile.truncate()\r\n openfile.write(save_xpos); openfile.write(\"\\n\")\r\n openfile.write(save_ypos); openfile.write(\"\\n\")\r\n openfile.write(save_cur_map); openfile.write(\"\\n\")\r\n openfile.write(save_hp); openfile.write(\"\\n\")\r\n openfile.write(save_gold); openfile.write(\"\\n\")\r\n openfile.write(save_missiles); openfile.write(\"\\n\")\r\n openfile.write(save_max_hp); openfile.write(\"\\n\")\r\n openfile.write(save_stats); openfile.write(\"\\n\")\r\n openfile.write(save_repairkits) ; openfile.write(\"\\n\")\r\n openfile.write(save_cannon_dam) ; openfile.write(\"\\n\") \r\n\r\n print(f'\\n ** GAME SAVED **')\r\n getch()\r\n \r\n\r\ndef load_game():\r\n clear()\r\n\r\n ## List out all saved games in current directory\r\n saved_files_list = []\r\n for savefile in os.listdir('.'):\r\n if savefile.startswith('savegame'):\r\n saved_files_list.append(savefile)\r\n \r\n ## Print the saved files\r\n print('\\n -- SAVE FILES --')\r\n \r\n ## index numb for printing and selection\r\n i = 1\r\n for file in saved_files_list:\r\n print(' ['+str(i)+'] '+file)\r\n i+=1\r\n\r\n print('\\n ['+str(i)+'] BACK')\r\n \r\n file_choice = bytes.decode(getch())\r\n\r\n if file_choice == str(i):\r\n return\r\n else:\r\n try:\r\n load_file = open(saved_files_list[int(file_choice)-1])\r\n except (ValueError, IndexError):\r\n return\r\n \r\n try:\r\n ## LOAD DATA FROM FILE\r\n ## ORDER: xpos, ypos, current_map, pl_hp, pl_gold, missiles, maxhp, stats, repairkits, candam\r\n load_xpos = int(load_file.readline())\r\n load_ypos = int(load_file.readline())\r\n load_cur_map = int(load_file.readline())\r\n load_pl_hp = int(load_file.readline())\r\n load_pl_gold = int(load_file.readline())\r\n load_pl_missiles = int(load_file.readline())\r\n load_pl_max_hp = int(load_file.readline())\r\n load_pl_stats = int(load_file.readline())\r\n load_pl_repairkits = int(load_file.readline())\r\n load_pl_cannon_dam = int(load_file.readline())\r\n load_file.close()\r\n \r\n mapData.pos_x = load_xpos\r\n mapData.pos_y = load_ypos\r\n globalStates.current_map = mapData.load_map_list[load_cur_map]\r\n playerData.HP = load_pl_hp\r\n playerData.GOLD = load_pl_gold\r\n playerData.MISSILES = load_pl_missiles\r\n playerData.MAX_HP = load_pl_max_hp\r\n playerData.STATS = load_pl_stats\r\n playerData.REPAIRKITS = load_pl_repairkits\r\n playerData.CANNON_DAM = load_pl_cannon_dam\r\n \r\n print('\\n GAME LOADED')\r\n getch()\r\n\r\n globalStates.current_commands = map_commands\r\n \r\n\r\n except ValueError:\r\n clear()\r\n print('\\n\\n # SAVE FILE CORRUPTED OR EMPTY #')\r\n print(' # PLEASE SELECT A DIFFERENT FILE #')\r\n print('\\n [press key]')\r\n getch()\r\n \r\n\r\n\r\n\r\n####################################\r\n##### THE OL GAME LOOP #####\r\n#################################### \r\n\r\n\r\ndef Main():\r\n\r\n while True:\r\n\r\n ## Assign current screen (map, menu, combat) to be printed and modified\r\n current_screen = globalStates.current_map\r\n ## Assign uncalled command function for current maps avaliable inputs\r\n current_command = globalStates.current_commands\r\n\r\n clear()\r\n \r\n ## Gameplay for menus- static screens with options instead of motion\r\n if current_screen in globalStates.static_maps:\r\n if current_screen != staticMaps.menu: \r\n print(f'\\n HP: {str(playerData.HP)}/{str(playerData.MAX_HP)} G: {str(playerData.GOLD)}')\r\n for line in current_screen:\r\n print(line)\r\n \r\n ## COMBAT MODE\r\n elif current_screen in globalStates.combat_maps:\r\n \r\n ## While enemy is still alive, keep combat going\r\n if enemyData.current_enemy.hp > 0:\r\n ## Print combat map and character data\r\n print_combat_screen(0)\r\n \r\n ## Player death condition ############## dubious at best\r\n elif playerData.HP <= 0:\r\n clear()\r\n print('\\n\\n\\n\\tYOU HAVE DIED\\n\\n\\tsucks to fuckin suck fuckeroni')\r\n getch()\r\n sys.exit()\r\n \r\n ## Enemy death\r\n else:\r\n globalStates.current_map = staticMaps.console_menu\r\n globalStates.current_commands = console_commands\r\n\r\n ## random event, 1 in 6 chance player gets a free missile\r\n win_event = random.randint(1,8)\r\n \r\n ## victory message\r\n print(f'\\n\\n\\t +++VICTORY+++ \\n\\n\\t{enemyData.current_enemy.name} is defeated\\n\\n\\t +{str(globalStates.won_gold)}gp')\r\n \r\n ## if win, give shit and whatnot\r\n if win_event == 2:\r\n print('\\n You scavenge +1 missile')\r\n playerData.MISSILES += 1\r\n elif win_event == 8:\r\n print('\\n You scavenge +1 repairkit')\r\n playerData.REPAIRKITS += 1\r\n\r\n getch()\r\n continue\r\n\r\n\r\n ## GAMEPLAY DYNAMIC MODE\r\n else:\r\n ## erase last player location\r\n overwrite_map(current_screen)\r\n ## Set current coordinates on the map to pl character\r\n current_screen[mapData.pos_x][mapData.pos_y] = 'X'\r\n print_map(current_screen)\r\n\r\n \r\n ## Get the ol player input key\r\n player_input = bytes.decode(getch())\r\n\r\n ## Run input through the current commands to get results\r\n returnAction = current_command(player_input, current_screen)\r\n \r\n\r\n\r\n## Assign player commands at runtime\r\nglobalStates.current_commands = map_commands\r\nMain()","repo_name":"orsini1138/AmEn2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":30201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"72553883432","text":"import heapq\nimport sys\n\ninput = sys.stdin.readline\n\nV, E = map(int, input().split())\nK = int(input())\ngraph = [[] for y in range(V + 1)]\n\nINF = int(1e9)\nfor _ in range(E):\n u, v, w = map(int, input().split())\n graph[u].append((v,w))\n\n#print(graph)\nD = [INF] * (V + 1)\n\ndef dijkstra(start):\n q = []\n heapq.heappush(q, (0,start))\n D[start] = 0\n\n while q:\n dist, now = heapq.heappop(q)\n if D[now] < dist:\n continue\n for i in graph[now]: # 0: dist 1: weight\n cost = dist + i[1]\n if cost < D[i[0]]:\n D[i[0]] = cost\n heapq.heappush(q, (cost,i[0]))\n\ndijkstra(K)\n\nfor i in range(1, V + 1):\n if D[i] == INF:\n print('INF')\n else:\n print(D[i])\n\n","repo_name":"jayyeong/Algorithm","sub_path":"Baekjoon/BOJproblem/BOJ1753최단경로_2.py","file_name":"BOJ1753최단경로_2.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"455550629","text":"# coding: utf8\n\ndef _get_thread_urls(thread):\n \"\"\"Вызов нитки с разными параметрами\"\"\"\n return [\n lambda: f'v3/thread/?uid={thread.uid}',\n lambda: f'v3/thread/?uid={thread.uid}&date={thread.start_date}',\n lambda: f'v3/thread/?uid={thread.uid}&from={thread.station_from}&to={thread.station_to}',\n lambda: f'v3/thread/?uid={thread.uid}&from={thread.station_from}&to={thread.station_to}'\n f'&date={thread.start_date}',\n lambda: f'v3/thread/?uid={thread.uid}&from={thread.station_from}&to={thread.station_to}'\n f'&date={thread.start_date}&result_timezone=Europe%2FLondon',\n ]\n\n\nclass ThreadParams(object):\n \"\"\"Нитка, полученная в поиске, и используемая потом для вызова ручки нитки\"\"\"\n def set_params(self, uid, station_from, station_to, start_date):\n self.uid = uid\n self.station_from = station_from\n self.station_to = station_to\n self.start_date = start_date\n\n\nclass SetThread(object):\n def __init__(self, thread):\n self.thread = thread\n\n def __call__(self, checker, response):\n segment = response.json()['segments'][0]\n\n self.thread.set_params(\n uid=segment['thread']['uid'],\n station_from=segment['from']['code'],\n station_to=segment['to']['code'],\n start_date=segment['start_date']\n )\n\n\ndef check_threads(search_url, search_url_params):\n \"\"\"\n Запуск тестов для нитки\n :param search_url: поиск, из результатов которого выбирается нитка\n :param params: параметры для вызова поиска\n \"\"\"\n thread = ThreadParams()\n url_params = search_url_params.copy()\n url_params['processes'] = [SetThread(thread)]\n thread_urls = _get_thread_urls(thread)\n\n return [\n search_url,\n url_params,\n thread_urls\n ]\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"travel/smoke_tests/smoke_tests/config/api_public/content_checkers.py","file_name":"content_checkers.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"2821118","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nr'''\n # Web开发-WSGI接口:WSGI处理函数\n # 使用场景:\n 正确的做法是底层代码由专门的服务器软件实现,我们用Python专注于生成HTML文档。因为我们不希望接触到TCP连接、HTTP原始请求和响应格式,所以,需要一个统一的接口,让我们专心用Python编写Web业务。\n 这个接口就是WSGI:Web Server Gateway Interface。\n WSGI接口定义非常简单,它只要求Web开发者实现一个函数,就可以响应HTTP请求。我们来看一个最简单的Web版本的“Hello, web!”:\n def application(environ, start_response):\n start_response('200 OK', [('Content-Type', 'text/html')])\n return [b'Hello, web!
'] \n 上面的application()函数就是符合WSGI标准的一个HTTP处理函数,它接收两个参数:\n 1、environ:一个包含所有HTTP请求信息的dict对象;\n 2、start_response:一个发送HTTP响应的函数。 \n 在application()函数中,调用:\n start_response('200 OK', [('Content-Type', 'text/html')])\n 就发送了HTTP响应的Header,注意Header只能发送一次,也就是只能调用一次start_response()函数。start_response()函数接收两个参数,一个是HTTP响应码,一个是一组list表示的HTTP Header,每个Header用一个包含两个str的tuple表示。\n 通常情况下,都应该把Content-Type头发送给浏览器。其他很多常用的HTTP Header也应该发送。\n 然后,函数的返回值b'Hello, web!
'将作为HTTP响应的Body发送给浏览器。\n 有了WSGI,我们关心的就是如何从environ这个dict对象拿到HTTP请求信息,然后构造HTML,通过start_response()发送Header,最后返回Body。\n 整个application()函数本身没有涉及到任何解析HTTP的部分,也就是说,底层代码不需要我们自己编写,我们只负责在更高层次上考虑如何响应请求就可以了。\n 不过,等等,这个application()函数怎么调用?如果我们自己调用,两个参数environ和start_response我们没法提供,返回的bytes也没法发给浏览器。\n 所以application()函数必须由WSGI服务器来调用。有很多符合WSGI规范的服务器,我们可以挑选一个来用。但是现在,我们只想尽快测试一下我们编写的application()函数真的可以把HTML输出到浏览器,所以,要赶紧找一个最简单的WSGI服务器,把我们的Web应用程序跑起来。\n 好消息是Python内置了一个WSGI服务器,这个模块叫wsgiref,它是用纯Python编写的WSGI服务器的参考实现。所谓“参考实现”是指该实现完全符合WSGI标准,但是不考虑任何运行效率,仅供开发和测试使用。 \n\n'''\n# 我们先编写*_hello*.py,实现Web应用程序的WSGI处理函数:\n\ndef application(environ, start_response):\n start_response('200 OK', [('Content-Type', 'text/html')])\n path = environ['PATH_INFO'].encode('iso-8859-1').decode('utf-8')[1:]\n # print('path =', path)\n body = 'Hello, %s!
' % (path or 'web') # 解决输入中文乱码问题,不可以输中文: (environ['PATH_INFO'][1:] or 'web')\n # print('body =', body)\n return [body.encode('utf-8')] # 中文换为:[body.encode('gbk')]\n # return [b'Hello, Web!
']\n\nr'''\n #注:然后,再编写一个*_server*.py,负责启动WSGI服务器,加载application()函数;\n 如果你觉得这个Web应用太简单了,可以稍微改造一下,从environ里读取PATH_INFO,这样可以显示更加动态的内容:\n # *_hello*.py可以做如下改造\n def application(environ, start_response):\n start_response('200 OK', [('Content-Type', 'text/html')])\n body = 'Hello, %s!
' % (environ['PATH_INFO'][1:] or 'web')\n return [body.encode('utf-8')]\n\n'''","repo_name":"CowryGolden/PythonTest","sub_path":"test/web_dev_wsgi_application_hello_test1.py","file_name":"web_dev_wsgi_application_hello_test1.py","file_ext":"py","file_size_in_byte":4080,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"30340051829","text":"# handles interaction between user_main.py, player.py, prim.py and kruskal.py\r\n# also handles drawing the maze boards\r\n\r\nfrom player import *\r\nfrom random import *\r\nfrom prim import *\r\nfrom kruskal import *\r\nfrom math import *\r\n\r\nclass Maze(object):\r\n puzzles = [\"kruskal\", \"prim\"]\r\n\r\n def initMaze(rows, cols, value=None):\r\n # returns 2D Maze Board \r\n # copied from https://www.cs.cmu.edu/~112/notes/notes-2d-lists.html\r\n a = []\r\n for row in range(rows):\r\n for col in range(cols):\r\n a += [ [value] * cols]\r\n return a\r\n\r\n def __init__(self, rows, cols, cellSize, width, height, puzzle=\"\"):\r\n # initializes a type of puzzle randomly only for debugging purposes\r\n self.rows = rows\r\n self.cols = cols\r\n self.board = Maze.initMaze(rows, cols)\r\n self.cellSize = cellSize\r\n self.width = width\r\n self.height = height\r\n self.player = Player(cellSize)\r\n self.walls = None\r\n if puzzle == \"\":\r\n puzzle = choice(Maze.puzzles)\r\n if puzzle == \"kruskal\":\r\n self.walls = Kruskal(rows, cols, cellSize, self.board)\r\n else:\r\n self.walls = Prim(rows, cols, cellSize, self.board)\r\n\r\n def draw(self, canvas, exitImg):\r\n # main function which calls another drawing function\r\n canvas.create_rectangle(0, 0, self.width, self.height, \r\n fill=\"mint cream\")\r\n self.drawMaze(canvas, exitImg)\r\n\r\n def drawMaze(self, canvas, exitImg):\r\n # draws the maze and the player\r\n walls = self.walls\r\n for wall in walls:\r\n (x0, y0, x1, y1) = getValues(wall, self.cellSize)\r\n canvas.create_line(x0, y0, x1, y1, width=2, fill=\"black\")\r\n self.player.draw(canvas)\r\n lx = (self.cols - 1) * self.cellSize + self.cellSize/2\r\n ly = (self.rows - 1) * self.cellSize + self.cellSize/2\r\n canvas.create_image(lx, ly, image=exitImg)\r\n\r\n def checkBounds(self, x, y):\r\n # checks for collisions of player with walls or going off the board\r\n if (((x - self.player.radius < 0) or \r\n (x + self.player.radius > self.width)) or \r\n ((y - self.player.radius < 0) or \r\n (y + self.player.radius > self.height))):\r\n # off the board check\r\n return False\r\n for wall in self.walls:\r\n ((frow, fcol), (srow, scol)) = wall\r\n (x0, y0, x1, y1) = getValues(wall, self.cellSize)\r\n if frow == srow:\r\n # horizontal wall check\r\n diff = abs(y - y0)\r\n if (diff < self.player.radius and \r\n (x0 <= x-self.player.radius <= x1 or \r\n x0 <= x+self.player.radius <= x1)):\r\n return False\r\n if fcol == scol:\r\n # vertical wall check\r\n diff = abs(x - x0)\r\n if (diff < self.player.radius and \r\n (y0 <= y-self.player.radius <= y1 or \r\n y0 <= y+self.player.radius <= y1)):\r\n return False\r\n return True\r\n\r\n def onKeyPressed(self, direction):\r\n # controls the movement of Player \r\n # sends updated values to check if within bounds, and then makes move\r\n x, y = self.player.x, self.player.y\r\n if direction == \"Up\" and self.checkBounds(x, y-self.player.speed):\r\n self.player.moveUp()\r\n elif direction == \"Down\" and self.checkBounds(x, y+self.player.speed):\r\n self.player.moveDown()\r\n elif direction == \"Right\" and self.checkBounds(x+self.player.speed, y):\r\n self.player.moveRight()\r\n elif direction == \"Left\" and self.checkBounds(x-self.player.speed, y):\r\n self.player.moveLeft()\r\n if ((self.player.x+self.player.radius>=(self.cols-1)*self.cellSize+self.cellSize/2)\r\n and (self.player.y+self.player.radius>=(self.rows-1)*self.cellSize+self.cellSize/2)):\r\n # checks if game is won\r\n return True\r\n\r\ndef getValues(wall, cellSize):\r\n # finds the pixel coordinates of wall\r\n ((frow, fcol), (srow, scol)) = wall\r\n x0 = fcol * cellSize\r\n y0 = frow * cellSize\r\n x1 = scol * cellSize\r\n y1 = srow * cellSize\r\n return (x0, y0, x1, y1)","repo_name":"sbhotika/15112-term-project","sub_path":"Maze.py","file_name":"Maze.py","file_ext":"py","file_size_in_byte":4329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"37295802809","text":"import math\n\nimport pymunk\nfrom pymunk.pyglet_util import DrawOptions\nfrom pymunk.vec2d import Vec2d\n\nimport pyglet\n\nwindow = pyglet.window.Window(900, 600, \"Rocketry\", resizable=False)\noptions = DrawOptions()\n\nspace = pymunk.Space()\nspace.gravity = 0, 0\n\ndef centroid(vertices, weights=None):\n num = len(vertices)\n if weights is None:\n weights = [1 / num] * num\n \n xsum = 0\n ysum = 0\n for i, v in enumerate(vertices):\n xsum += v[0] * weights[i]\n ysum += v[1] * weights[i]\n \n return (xsum, ysum)\n\n\nclass Part:\n STACKED = 0\n RADIAL = 1\n\n def __init__(self):\n self.vertices = [(0, 0)]\n\n self.mount = Part.STACKED\n\n self.mass = 0\n self.radial_size = 0 # only applies to circular parts\n self.height = 0\n\n self.impact_tolerance = 0 # mps\n self.heat_tolerance = 0\n\n def move_to(self, x, y):\n self.vertices = [(v[0] + x, v[1] + y) for v in self.vertices]\n\n\nclass Engine(Part):\n def __init__(self):\n super().__init__()\n\n self.radial_size = 90\n self.height = 180\n\n self.mass = 150\n self.burn = 8.8\n self.burning = False\n self.atm_thrust = 162.91\n self.vac_thrust = 192.0\n\n def engage(self):\n self.burning = True\n\n def get_impulse(self, dt):\n if self.burning:\n self.burn -= dt\n bottom_left = self.vertices[0]\n impulse_x = bottom_left[0] + self.radial_size // 2\n impulse_y = bottom_left[1] - self.height // 2\n return (impulse_x, impulse_y, self.atm_thrust)\n else:\n return (0, 0, 0)\n\n\nclass Rocket:\n def __init__(self):\n self.parts = []\n\n def add_part(self, part):\n self.parts.append(part)\n\n def get_body_and_shape(self):\n # compile part data\n vertices = []\n mass = 0\n centroids_map = {}\n for p in self.parts:\n vertices += p.vertices\n mass += p.mass\n centroids_map[centroid(p.vertices)] = p.mass\n\n # calculate center of mass\n weighted_centroids_map = {c: m / mass for c, m in centroids_map.items()}\n centroids = list(weighted_centroids_map.keys())\n weights = [weighted_centroids_map[c] for c in centroids]\n com = centroid(centroids, weights)\n\n # calculate offset\n vertices = list(set(vertices))\n center = centroid(vertices)\n offset = Vec2d(center) - Vec2d(com)\n\n # create body\n body = pymunk.Body(mass)\n shape = pymunk.Poly(body, vertices)\n body.moment = pymunk.moment_for_poly(mass, vertices)\n body.center_of_gravity = com\n body.position = 450, 300\n\n return body, shape, body.local_to_world(com)\n\npod = Part()\npod.vertices = [(0, 0), (45, 90), (90, 0)]\npod.mass = 10\n\nengine = Engine()\nengine.vertices = [(0, 0), (0, -engine.height), (engine.radial_size, -engine.height), (engine.radial_size, 0)]\nengine.engage()\n\nrocket = Rocket()\nrocket.parts = [pod, engine]\n\nbody, shape, com = rocket.get_body_and_shape()\nspace.add(body, shape)\n\nlabel = pyglet.text.Label(\"Center Of Mass\",\n font_size=8,\n x=com[0], y=com[1],\n anchor_x=\"center\", anchor_y=\"center\")\nx, y = body.local_to_world((45, 90))\nlabel2 = pyglet.text.Label(\"(45, 90)\",\n font_size=8,\n x=x, y=y,\n anchor_x=\"center\", anchor_y=\"center\")\n \n\n@window.event\ndef on_draw():\n window.clear()\n space.debug_draw(options)\n label.draw()\n label2.draw()\n\ndef update(dt):\n global label2\n\n impulse_x, impulse_y, impulse_amount = engine.get_impulse(dt)\n x, y = body.local_to_world((impulse_x, impulse_y))\n label2 = pyglet.text.Label(\"Impulse\",\n font_size=8,\n x=x, y=y,\n anchor_x=\"center\", anchor_y=\"center\")\n\n impulse_x, impulse_y, impulse_amount = engine.get_impulse(dt)\n body.apply_impulse_at_local_point((0, impulse_amount), (impulse_x, impulse_y))\n space.step(dt)\n\nif __name__ == \"__main__\":\n pyglet.clock.schedule_interval(update, 1.0 / 60.0)\n pyglet.app.run()\n","repo_name":"henrymwestfall/Pyglet-Pymunk-Test","sub_path":"rocket.py","file_name":"rocket.py","file_ext":"py","file_size_in_byte":4232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"23070082037","text":"def bin_rota(arr):\n resp = []\n for i,v in enumerate(arr):\n if i%2==0: resp+=v\n else: resp+=list(reversed(v))\n return resp\n\n\ndef bin_rota_up(arr):\n return [name for i, row in enumerate(arr) for name in row[::-1 if i%2 else 1]]\n\n\n\nprint(bin_rota([[\"Stefan\", \"Raj\", \"Marie\"],\n [\"Alexa\", \"Amy\", \"Edward\"],\n [\"Liz\", \"Claire\", \"Juan\"],\n [\"Dee\", \"Luke\", \"Katie\"]]))\nprint(bin_rota_up([[\"Stefan\", \"Raj\", \"Marie\"],\n [\"Alexa\", \"Amy\", \"Edward\"],\n [\"Liz\", \"Claire\", \"Juan\"],\n [\"Dee\", \"Luke\", \"Katie\"]]))\n\n\n","repo_name":"igruiz91/Codewars-HackerRank-LeetCode-CoderBite-freeCodeCamp","sub_path":"Codewars/Python/7 kyu/II/The Lazy Startup Office.py","file_name":"The Lazy Startup Office.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"2438640237","text":"\"\"\"\n作业1\n在Hero的基础上,新增了法师英雄,多了下面内容\n\n法师(Mage)\n 增加属性:魔法值(magical 默认0,最大100)\n\n - 攻击:\n 调用一次 怒气 `+2`\n 调用一次 魔法值 `+5`\n\n - 放大招\n 魔法值满时自动放大招\n\n - 第二形态\n 当怒气值满时 自动切换第二形态(魔法值最大值修改为50)\n\"\"\"\n\n\nclass Hero:\n def __init__(self, name, weapon, equipment, power, blood, anger):\n self.name = name\n self.weapon = weapon\n self.equipment = equipment\n self.power = power\n self.blood = blood\n self.anger = anger\n\n def attack(self):\n print(f'{self.name} 发动了攻击!')\n self.anger += 2\n\n if self.anger == 100: # 怒气值满\n self.big_data()\n\n def big_data(self):\n print(f'{self.name} 释放了大招!')\n self.anger = 0\n\n\nclass Mage(Hero):\n def __init__(self, name, weapon, equipment, power, blood, anger):\n super().__init__(name, weapon, equipment, power, blood, anger)\n self.magical = 0\n\n def second_form(self):\n print(f'{self.name} 变身第二形态')\n if self.magical >= 50:\n self.big_data()\n self.anger = 0\n print(f'{self.name}恢复原形')\n\n def big_data(self):\n print(f'{self.name} 释放了大招!')\n self.magical = 0\n\n def attack(self): # !!!!主函数\n print(f'{self.name} 发动了攻击!')\n self.magical += 5\n self.anger += 2\n\n if self.magical == 100:\n self.big_data()\n\n elif self.anger == 100:\n self.second_form()\n\n\nmage1 = Mage('牛逼魔法师', '魔法棒', '扫帚', 0, 100, 78)\nfor i in range(100):\n mage1.attack()\n print(mage1.magical)\n print(mage1.anger)\n\n'''\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 思路 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n变身第二形态是为了干什么 --> 让最大法力变成50:法力50就可以放大 --> 在second_form()当中判断当变身的时候法力如果\n大于50就放大. 当然, 没有50就放不了大,之后怒气恢复0.因为题目中是怒气满100自动第二形态,所以能不能放大也要看运气, 可能\n第二形态的时候魔力没有满50.\n'''\n","repo_name":"lll13508510371/hexin","sub_path":"核心/10 类与继承/03 课后作业/0015-10-00000001-对象继承.py","file_name":"0015-10-00000001-对象继承.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"3436000870","text":"import heapq\nclass Solution:\n def constrainedSubsetSum(self, nums: List[int], k: int) -> int:\n l = len(nums)\n first_pos = l\n last_pos = -1\n for i,v in enumerate(nums):\n if v > 0:\n first_pos = min(first_pos, i)\n last_pos = max(last_pos, i)\n if last_pos == -1:\n return max(nums)\n\n\n l = len(nums)\n q = [(-nums[first_pos], first_pos)]\n for i in range(first_pos+1, last_pos+1):\n while q and q[0][1] + k < i:\n heapq.heappop(q)\n\n waspos = nums[i] >= 0\n\n nums[i] += max(0, -q[0][0])\n \n if waspos:\n q = []\n \n heapq.heappush(q, (-nums[i], i))\n #print(nums)\n return max(nums)\n","repo_name":"jlcarr/LeetCode","sub_path":"Problem_1425/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"69972747432","text":"import pymongo\nfrom flask import Flask, render_template, url_for, request, flash, redirect, jsonify, send_file, session\nfrom flask_cors import CORS, cross_origin\nfrom bson.json_util import dumps\nimport json\napp = Flask(__name__)\nfrom update_route import *\nmongo = pymongo.MongoClient(host=\"localhost\",port=27017)\ndb=mongo.algoinvstr\n\n@app.route('/')\ndef index():\n niftyv = db.nifty.find()\n companylist = db.toptwenty.find()\n return render_template('index.html',nifty=niftyv[0][\"cur_val\"],nifty_high=niftyv[0][\"all_time_high\"],change_per=niftyv[0][\"down_per\"],mylist=companylist,time=niftyv[0][\"time\"],date=niftyv[0][\"date\"])\n\n@app.route('/update')\ndef call():\n updatecalled()\n niftyv = db.nifty.find()\n companylist = db.toptwenty.find()\n return render_template('index.html',nifty=niftyv[0][\"cur_val\"],nifty_high=niftyv[0][\"all_time_high\"],change_per=niftyv[0][\"down_per\"],mylist=companylist,time=niftyv[0][\"time\"],date=niftyv[0][\"date\"])\n\n\nif __name__ == \"__main__\":\n print('started')\n app.run(debug=True)","repo_name":"Shrirampareek888/Algorithmic-Investor","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"35399565223","text":"import pandas as pd\nimport re\nfrom nltk.corpus import stopwords\nimport redditcleaner\n\n\ndef cleancsv(input_filepath, output_filepath):\n df = pd.read_csv(input_filepath, encoding='utf-8')\n\n #filtering out entries that have no text\n df = df[df['selftext'].notnull()]\n\n #filtering out entries that have no title\n df = df[df['title'].notnull()]\n\n #converting to time stamp and dropping unnecessary columns\n df['time_created'] = pd.to_datetime(df['created_utc'], unit='s')\n df = df.drop(columns = ['url', 'created_utc', 'created'])\n\n #removing rows that are deleted, removed, and blank comments\n df = df.loc[df['selftext'] != '[deleted]']\n df = df.loc[df['selftext'] != '[removed]']\n df = df.loc[df['selftext'] != '']\n\n #removing rows that are deleted, removed, and blank titles\n df = df.loc[df['title'] != '[deleted]']\n df = df.loc[df['title'] != '[removed]']\n df = df.loc[df['title'] != '']\n\n #processing text\n df['processed_text'] = df['selftext'].map(redditcleaner.clean)\n\n #removing blank comments\n df = df.loc[df['processed_text'] != '']\n\n #removing puncutation\n df['processed_text'] = df['processed_text'].map(lambda x: re.sub('[,;\\!?]', '', x))\n\n #removing any missed urls\n df['processed_text'] = df['processed_text'].map(lambda x: re.sub(r'(?:(?:http|https):\\/\\/)?([-a-zA-Z0-9.]{2,256}\\.[a-z]{2,4})\\b(?:\\/[-a-zA-Z0-9@:%_\\+.~#?&//=]*)?',\"\",x,flags=re.MULTILINE))\n \n #removing puncutation\n df['processed_text'] = df['processed_text'].map(lambda x: re.sub('[,;\\.!?]', '', x))\n\n #removing parentheses\n df['processed_text'] = df['processed_text'].map(lambda x: re.sub('[()]' ,'', x))\n\n #fixing apostrophes\n #df['processed_text'] = df['processed_text'].replace({\"’\" : \"'\"}, regex=True)\n\n #lowercasing all the words\n df['processed_text'] = df['processed_text'].map(lambda x: x.lower())\n\n #removing stopwords\n stop = stopwords.words('english')\n df['processed_text'] = df['processed_text'].apply(lambda x: ' '.join([item for item in str.split(x) if item not in stop]))\n\n #removing abbreviations\n df['processed_text'] = df['processed_text'].map(lambda x: re.sub('y/o' ,'year old', x))\n\n #removing posts that have any NAs or blank commends\n df = df.loc[df['processed_text'] != '']\n df = df.dropna()\n\n #---------------------------------------------------\n #---------------------------------------------------\n #now doing the same process for titles \n\n #processing text\n df['processed_title'] = df['title'].map(redditcleaner.clean)\n\n #removing blank comments\n df = df.loc[df['processed_title'] != '']\n\n #removing puncutation\n df['processed_title'] = df['processed_title'].map(lambda x: re.sub('[,;\\!?]', '', x))\n\n #removing any missed urls\n df['processed_title'] = df['processed_title'].map(lambda x: re.sub(r'(?:(?:http|https):\\/\\/)?([-a-zA-Z0-9.]{2,256}\\.[a-z]{2,4})\\b(?:\\/[-a-zA-Z0-9@:%_\\+.~#?&//=]*)?',\"\",x,flags=re.MULTILINE))\n \n #removing puncutation\n df['processed_title'] = df['processed_title'].map(lambda x: re.sub('[,;\\.!?]', '', x))\n\n #lowercasing all the words\n df['processed_title'] = df['processed_title'].map(lambda x: x.lower())\n\n #removing parentheses and brackets\n df['processed_title'] = df['processed_title'].map(lambda x: re.sub('[()]' ,'', x))\n df['processed_title'] = df['processed_title'].map(lambda x: re.sub('[\\[\\]]' ,'', x))\n\n #removing abbreviations\n df['processed_title'] = df['processed_title'].map(lambda x: re.sub('y/o' ,'year old', x))\n\n #removing posts that have any NAs or blank commends\n df = df.loc[df['processed_title'] != '']\n df = df.dropna()\n\n #removing stopwords\n stop = stopwords.words('english')\n df['processed_title_no_stop'] = df['processed_title'].apply(lambda x: ' '.join([item for item in str.split(x) if item not in stop]))\n\n #printing out the first 5 rows \n print(df.head())\n\n #printing to csv\n df.to_csv(output_filepath, index=False, encoding='utf-8-sig')\n\n\n\"\"\" \nRuns data processing scripts to turn raw data from (../raw) into\ncleaned data ready to be analyzed (saved in ../processed).\n\"\"\"\ndef main(input_filepath, output_filepath):\n \n cleancsv(input_filepath, output_file)\n\nif __name__ == '__main__':\n input_file = '../../data/raw/raw_reddit_scrape_3.csv'\n output_file = '../../data/processed/cleaned_reddit_data_3.csv'\n\n main(input_file, output_file)\n","repo_name":"vmmadathil/forums_aca_needs","sub_path":"src/data/make_dataset.py","file_name":"make_dataset.py","file_ext":"py","file_size_in_byte":4433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"73285344233","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom PySide6.QtWidgets import QDialog, QLabel\n\nfrom .license_dialog_ui import Ui_LicenseDialog\n\n\nsoftwares = [\n ('PySide6', '6.5.1', 'https://pypi.org/project/PySide6/', 'Commercial, GPLv2, LGPLv3', 'https://pypi.org/project/PySide6/'),\n # ('Paraview', '', 'https://www.paraview.org/', 'permissive BSD', 'https://www.paraview.org/license/'),\n ('ionicons', '', 'https://ionic.io/ionicons', 'Completely open source, MIT licensed', 'https://ionic.io/ionicons'),\n ('PyFoam', '2022.9', 'https://pypi.org/project/PyFoam', 'GPLv2+', 'https://pypi.org/project/PyFoam/'),\n ('h5py', '3.9.0', 'https://docs.h5py.org/en/stable/', 'h5py', 'https://docs.h5py.org/en/stable/licenses.html'),\n ('qasync', '0.24.0', 'https://pypi.org/project/qasync/', 'BSD', 'https://pypi.org/project/qasync/'),\n ('psutil', '5.9.5', 'https://pypi.org/project/psutil/', 'BSD', 'https://pypi.org/project/psutil/'),\n]\n\nclass LicenseDialog(QDialog):\n def __init__(self, widget):\n super().__init__(widget)\n self._ui = Ui_LicenseDialog()\n self._ui.setupUi(self)\n\n layout = self._ui.licenses.layout()\n row = 1\n for software, version, url, licence, licneceUrl in softwares:\n softwareLink = QLabel(f'{software}')\n softwareLink.setOpenExternalLinks(True)\n layout.addWidget(softwareLink, row, 0)\n licenceLink = QLabel(f'{licence}')\n licenceLink.setOpenExternalLinks(True)\n layout.addWidget(licenceLink, row, 1)\n row += 1\n\n self._connectSignalsSlots()\n\n def _connectSignalsSlots(self):\n self._ui.close.clicked.connect(self.close)\n","repo_name":"nextfoam/baram","sub_path":"baramMesh/view/menu/help/license_dialog.py","file_name":"license_dialog.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"72"}
+{"seq_id":"70633343914","text":"from typing import Dict, List\n\nfrom canvas_workflow_kit import events\nfrom canvas_workflow_kit.canvas_code_set import CanvasCodeSet\nfrom canvas_workflow_kit.intervention import Intervention\nfrom canvas_workflow_kit.patient_recordset import InterviewRecordSet\nfrom canvas_workflow_kit.protocol import STATUS_DUE, STATUS_SATISFIED, ClinicalQualityMeasure, ProtocolResult\nfrom canvas_workflow_kit.recommendation import (\n Recommendation, ImmunizationRecommendation\n)\nfrom canvas_workflow_kit.timeframe import Timeframe\nfrom canvas_workflow_kit.value_set.specials import (\n Covid19QuestionnaireHighRiskOutreach,\n Covid19QuestionnaireSymptomaticSurveillance\n)\n\n\nfrom canvas_workflow_kit.value_set.v2018 import (\n InfluenzaVaccine_1254\n)\n\n# flake8: noqa\n\n\nclass HyperlinkRecommendation(ClinicalQualityMeasure):\n\n class Meta:\n title = 'Hyperlink Recommendation'\n version = \"1.2\"\n changelog = \"Initial release\"\n\n description = 'All patients with COVID Questionnaire completed Date < 7 days ago and > 5 days ago.'\n information = 'https://canvas-medical.zendesk.com/hc/en-us/articles/360059084173-COVID-19-Risk-Assessment-Follow-Up-Protocol'\n\n identifiers = ['CCP001v1']\n\n types = ['CCP']\n\n responds_to_event_types = [\n events.HEALTH_MAINTENANCE,\n ]\n authors = [\n 'Canvas Medical Team',\n ]\n\n compute_on_change_types = [\n ClinicalQualityMeasure.CHANGE_INTERVIEW,\n ]\n\n def compute_results(self) -> ProtocolResult:\n result = ProtocolResult()\n\n result.add_recommendation(\n Intervention(\n title='Link Rec Title',\n narrative=f'Link Rec Narr',\n href='http://canvasmedical.com'\n )\n )\n result.add_recommendation(\n ImmunizationRecommendation(\n key='KEY-ID',\n rank=123,\n button='ACT',\n patient=self.patient,\n immunization=InfluenzaVaccine_1254)\n )\n\n return result\n","repo_name":"dhes/canvas-workflow-kit-0.6.8","sub_path":"canvas_workflow_kit/builtin_cqms/hyperlink_recommendation.py","file_name":"hyperlink_recommendation.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"36261941750","text":"import gzip\nimport base64\nfrom lxml import etree\nfrom cardmarket_api.call import api_request\nfrom cardmarket_api.utils import dict_to_xml\n\n\nclass CardMarketSession:\n \"\"\"Create a session for specified account\"\"\"\n\n # 5000 requests per 24 hours max\n request_count = 0\n expires = None\n\n def __init__(self, mkm_app_token, mkm_app_secret, mkm_access_token, mkm_token_secret):\n self.credentials = {\"mkm_app_token\": mkm_app_token,\n \"mkm_app_secret\": mkm_app_secret,\n \"mkm_access_token\": mkm_access_token,\n \"mkm_token_secret\": mkm_token_secret}\n\n @api_request\n def get_metaproduct_info(self, metaproduct_id):\n \"\"\"Return info for metaproduct specified by id\"\"\"\n endpoint = \"/metaproduct/{0}\".format(metaproduct_id)\n data_type = \"metaproduct\"\n return {\"endpoint\": endpoint, \"data_type\": data_type}\n\n @api_request\n def get_product_info(self, product_id):\n \"\"\"Return info for product specified by id\"\"\"\n endpoint = \"/products/{0}\".format(product_id)\n data_type = \"product\"\n return {\"endpoint\": endpoint, \"data_type\": data_type}\n\n @api_request\n def get_products_by_name(self, card_name, exact=False, id_language=1):\n \"\"\"Return a list of product from card_name either in English or French.\n card_name has to be an exact match\n English idLanguage = 1\n French idLanguage = 2\"\"\"\n endpoint = \"/products/find\"\n parameters = {\"search\": card_name, \"exact\": exact, \"idGame\": 1, \"idLanguage\": id_language}\n return {\"endpoint\": endpoint, \"data_type\": \"product\", \"parameters\": parameters}\n\n @api_request\n def get_articles_for_sale(self, product_id):\n \"\"\"Return list of all articles for sale for a specified article\"\"\"\n endpoint = \"/articles/{0}\".format(product_id)\n data_type = \"article\"\n return {\"endpoint\": endpoint, \"data_type\": data_type}\n\n def get_all_products(self):\n \"\"\"Return binary list of all cardMarket products. Use 'wb' to write down\"\"\"\n gzip_file = self.get_all_products_file()\n data = gzip.decompress(base64.b64decode(gzip_file))\n return data\n\n @api_request\n def get_all_products_file(self):\n \"\"\"Return gzip file with all cardMarket products\"\"\"\n endpoint = \"/productlist\"\n data_type = \"productsfile\"\n return {\"endpoint\": endpoint, \"data_type\": data_type}\n\n @api_request\n def get_all_expansions(self):\n \"\"\"Return list of all expansions\"\"\"\n endpoint = \"/games/1/expansions\"\n return {\"endpoint\": endpoint}\n\n @api_request\n def get_wantlists(self):\n \"\"\"Return list of all wantLists of the account\"\"\"\n endpoint = \"/wantslist\"\n data_type = \"wantslist\"\n return {\"endpoint\": endpoint, \"data_type\": data_type}\n\n @api_request\n def get_cards_from_wantlist(self, id):\n \"\"\"Return list of cards from specified wantList by id\"\"\"\n endpoint = \"/wantslist/{0}\".format(id)\n data_type = \"want\"\n return {\"endpoint\": endpoint, \"data_type\": data_type}\n\n @api_request\n def get_shopping_cart(self):\n \"\"\"Return dict with shippingAddress, shoppingCart and account\"\"\"\n endpoint = \"/shoppingcart\"\n return {\"endpoint\": endpoint}\n\n @staticmethod\n def construct_xml(dict_list):\n \"\"\"Return MKM API valid binary string XML from a list of dict\n For empty SubElement set value to None type\"\"\"\n xml_tree = etree.Element(\"request\")\n [dict_to_xml(xml_tree, d) for d in dict_list]\n return etree.tostring(xml_tree, encoding='UTF-8', xml_declaration=True)\n\n\nif __name__ == \"__main__\":\n # Tests:\n\n print(cm.get_products_by_name(\"giant spider\"))\n print(cm.request_count)\n","repo_name":"NicolasCapon/cardmarket_api","sub_path":"cardmarket_api/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":3824,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"2767885388","text":"'''\ninputs: month, day, Daily_Temp, Daily_Precip, Daily_Humidity, Daily_Pressure, Daily_WindDir,\n Daily_WindSpeed, Daily_DNI, Daily_DHI\n\noutput: Daily_radiation\n'''\n\n# import modules\nfrom flask import Flask, jsonify, request\nimport pandas as pd\nimport joblib\nfrom app.preprocessing_functions import log_transform\n\napp = Flask(__name__)\n\n@app.route('/', methods=['GET', 'POST'])\ndef get_input():\n '''\n Flask script to interface between user request and ml model selected during POC \n '''\n # load packets\n packet = request.get_json(force=True)\n print(packet)\n\n # extract and reshape input data\n #input_data = list(packet.values())\n\n #print(input_data)\n\n # reshape data\n data = pd.DataFrame(packet, index=[0])\n\n print(data)\n\n # load the ml model\n model_path = 'app/rf_model.joblib'\n model = joblib.load(model_path)\n\n # generate prediction\n solar_irr = model.predict(data)[0]\n\n return jsonify(packet, {'Solar irradiation':solar_irr})","repo_name":"max-lutz/ML-tools-and-algorithms","sub_path":"MLOps/MLOps_course_Omdena/Solar_project/solar_app/app/flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"184196879","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport runner # noqa\n\nfrom core.testcase import TestCase, main\nfrom core.types import HyperCategory, Offer, Shop\nfrom core.matcher import Absent\n\n\nclass T(TestCase):\n @classmethod\n def prepare_pricedrops(cls):\n cls.index.shops += [\n Shop(fesh=20267000),\n Shop(fesh=20267001),\n Shop(fesh=20267002),\n Shop(fesh=20267003),\n Shop(fesh=20267004),\n ]\n\n cls.index.hypertree += [\n HyperCategory(hid=20267000),\n ]\n\n cls.index.offers += [\n Offer(title=\"offer1\", fesh=20267000, enable_auto_discounts=1, price=100000, price_history=None),\n Offer(\n title=\"offer2\",\n fesh=20267001,\n enable_auto_discounts=1,\n price=100000,\n price_old=150000,\n price_history=None,\n ),\n Offer(title=\"offer3\", fesh=20267002, enable_auto_discounts=1, price=100000, price_history=120000),\n Offer(\n title=\"offer4\",\n fesh=20267003,\n enable_auto_discounts=1,\n price=100000,\n price_old=150000,\n price_history=120000,\n ),\n Offer(title=\"offer5\", fesh=20267004, enable_auto_discounts=1, price=150000, price_history=120000),\n ]\n\n def test_autosale_no_disount_if_nothing_old(self):\n response = self.report.request_json('place=prime&fesh=20267000')\n self.assertFragmentIn(\n response,\n {\n \"titles\": {\"raw\": \"offer1\"},\n \"prices\": {\"value\": \"100000\", \"discount\": Absent()},\n },\n )\n\n def test_autosale_disount_if_only_history(self):\n response = self.report.request_json('place=prime&fesh=20267002')\n self.assertFragmentIn(\n response, {\"titles\": {\"raw\": \"offer3\"}, \"prices\": {\"value\": \"100000\", \"discount\": {\"percent\": 17}}}\n )\n\n def test_autosale_disount_if_oldprice_and_hprice(self):\n response = self.report.request_json('place=prime&fesh=20267003')\n self.assertFragmentIn(\n response, {\"titles\": {\"raw\": \"offer4\"}, \"prices\": {\"value\": \"100000\", \"discount\": {\"percent\": 17}}}\n )\n\n def test_drop_bad_autosale(self):\n response = self.report.request_json('place=prime&fesh=20267004')\n self.assertFragmentIn(\n response, {\"titles\": {\"raw\": \"offer5\"}, \"prices\": {\"value\": \"150000\", \"discount\": Absent()}}\n )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"market/GENERAL/test_enable_auto_discounts.py","file_name":"test_enable_auto_discounts.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"17302262227","text":"from itertools import groupby\nfrom enum import Enum\nimport copy\nfrom flaskr import typing\n\n\nclass AggregationType(str, Enum):\n pricing = 'p'\n name = 'n'\n\n\n# https://stackoverflow.com/questions/5884066/hashing-a-dictionary\ndef make_hash(o):\n \"\"\"\n makes a hash out of anything that contains only list,dict and hashable types including string and numeric types\n \"\"\"\n def _freeze(o):\n if isinstance(o, dict):\n return frozenset({ k:_freeze(v) for k,v in o.items()}.items())\n if isinstance(o, (list)):\n return tuple([_freeze(v) for v in o])\n\n return str(o)\n return hash(_freeze(o))\n\n\ndef _key(aggregationType: AggregationType):\n if aggregationType == AggregationType.pricing:\n return lambda a: make_hash([a['pricing'] if 'pricing' in a else None, a['currency']])\n if aggregationType == AggregationType.name:\n return lambda a: make_hash([a['name'], a['currency']])\n\n\ndef _filter(aggregationType: AggregationType):\n if aggregationType == AggregationType.pricing:\n return None\n if aggregationType == AggregationType.name:\n return lambda a: 'pricing' not in a\n\n\ndef _asList(o, key=None):\n if isinstance(o, dict):\n if key in o:\n return _asList(o[key])\n return []\n if isinstance(o, list):\n return o\n return [o]\n\n\ndef _merge(lhs, rhs):\n assert 'pricing' not in lhs or 'pricing' not in rhs or lhs['pricing'] == rhs['pricing']\n assert lhs['currency'] == rhs['currency']\n\n result = copy.deepcopy(lhs)\n\n result['_id'] = list(set(_asList(lhs, '_id') + _asList(rhs, '_id')))\n result['institution'] = list(set(_asList(lhs, 'institution') + _asList(rhs, 'institution')))\n\n result['operations'] = sorted(_asList(lhs, 'operations') + _asList(rhs, 'operations'), key=lambda op: op['date'])\n finalQuantity = 0\n for operation in result['operations']:\n if 'quantity' in operation:\n finalQuantity = typing.Operation.adjustQuantity(operation['type'], finalQuantity, operation['quantity'])\n operation['finalQuantity'] = finalQuantity\n\n result['finalQuantity'] = finalQuantity\n\n return result\n\n\ndef aggregate(assets, type: AggregationType):\n filt = _filter(type)\n ignored = []\n if filt:\n assetsSorted = sorted(assets, key=filt)\n for key, group in groupby(assetsSorted, filt):\n if key:\n assets = list(group)\n else:\n ignored = list(group)\n\n key = _key(type)\n assets = sorted(assets, key=key)\n\n pos = 0\n while pos < len(assets)-1:\n if key(assets[pos]) != key(assets[pos+1]):\n pos += 1\n continue\n\n assets[pos] = _merge(assets[pos], assets[pos+1])\n del assets[pos+1]\n\n return ignored + assets\n","repo_name":"kpk-pl/wallet","sub_path":"web-gui/flaskr/analyzers/aggregate.py","file_name":"aggregate.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"12849935446","text":"def duplicate_number(arr):\n \"\"\"\n :param - array containing numbers in the range [0, len(arr) - 2]\n return - the number that is duplicate in the arr\n \n numbers_dict = dict()\n result = 0\n for digit in arr:\n if numbers_dict.get(digit, 0) == 0:\n numbers_dict[digit] = 1\n elif numbers_dict[digit] == 1:\n numbers_dict[digit] = 2\n result = digit\n break\n \n return result\n \"\"\"\n current_sum = 0\n expected_sum = 0\n \n for num in arr:\n current_sum += num\n\n for i in range(len(arr) - 1):\n expected_sum += i\n \n return current_sum - expected_sum\n\ndef test_function(test_case):\n arr = test_case[0]\n solution = test_case[1]\n output = duplicate_number(arr)\n if output == solution:\n print(\"Pass\")\n else:\n print(\"Fail\")\n\narr = [0, 0]\nsolution = 0\n\ntest_case = [arr, solution]\ntest_function(test_case)\n\narr = [0, 2, 3, 1, 4, 5, 3]\nsolution = 3\n\ntest_case = [arr, solution]\ntest_function(test_case)\n\narr = [0, 1, 5, 4, 3, 2, 0]\nsolution = 0\n\ntest_case = [arr, solution]\ntest_function(test_case)\n\narr = [0, 1, 5, 5, 3, 2, 4]\nsolution = 5\n\ntest_case = [arr, solution]\ntest_function(test_case)\n\narr = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 11]\nsolution = 11\n\ntest_case = [arr, solution]\ntest_function(test_case)","repo_name":"marcotello/PythonPractices","sub_path":"DataStrucutures/Arrays/duplicate_numbers.py","file_name":"duplicate_numbers.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"15211351945","text":"#module for storing RNA-Seq pipeline auxiliary functions\nimport sys\n#1) Imports, this relies on utils keeping same relative path\nutil_dir = '../../common_scripts/pipe_utils/'\nsys.path.append(util_dir)\nfrom import_file import *\nimport matplotlib.ticker as plticker\n\ndef get_fasta(infasta, outfasta, write_all = True, get_chr = None):\n '''\n Given a multifasta, split into individual files.\n Can be useful for testing tools.\n If get_chr != None, will write a matching fasta. Otherwise will write all.\n #also see notebook C2.19c\n '''\n records = SeqIO.to_dict(SeqIO.parse(infasta, \"fasta\"))\n\n for k in records:\n records[k].name = ''\n records[k].description = ''\n\n with open(outfasta, 'w') as g:\n for k in records:\n SeqIO.write(records[k], g, 'fasta')\n\ndef test_plot():\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(range(0, 10), range(0, 10), color = 'k', s = 10)\n #ax.text(0.1, 0.8, 'r2 = %1.3f\\nn = %s' % (r2_val, num_plotted), transform = ax.transAxes)\n plt.savefig('testplot.png')\n plt.close(fig)\n\n #label_axes(ax, xname = xname, yname = yname, label_index_level = label_index_level, axis_title_suffix = axis_title_suffix)\n return fig\n #return {'num_plotted': num_plotted, 'fig': fig, 'ax': ax}\n\n\ndef remove_spikeins(df, spikenames = ['ERCC', 'SIRV']):\n '''\n Remove all rows with index starting with spikein names, e.g. ERCC and SIRV\n '''\n all_txts = set(df.index.values)\n allspike = set()\n for s in spikenames:\n geneset = set([i for i in all_txts if i.startswith(s)])\n allspike = allspike | geneset\n\n #Remove ERCC and SIRV genes from dataset as we don't want to plot these for gene reproducibility\n df.drop(labels = allspike, inplace = True)\n\ndef quick_barplot(df, cols = None, label_index_level = None, axis_title_suffix = '', title = '', limits = None, ticklabels = None, **kwargs):\n '''\n Make quick barplot to summarize count data,\n for example from the spike-in or rRNA-mapping reads\n Input:\n a dataframe (df) containing y_col values to plot, x_col ids\n divide_by = a number which all values in y_col will be divided by\n percent = convert fraction to percent for plotting\n '''\n #normalize values, this will change the col_to_plot\n x_col, y_col = cols[0:]\n col_to_plot = y_col\n if kwargs['divide_by'] != 1:\n df['frac'] = df[y_col]/kwargs['divide_by']\n col_to_plot = 'frac'\n\n if kwargs['percent'] == True:\n df['percent'] = df['frac']*100\n col_to_plot = 'percent'\n\n height = 4\n width_ratio = len(df)/16\n width = height*width_ratio\n if width < 4:\n width = 4\n fig = plt.figure(figsize = (width, height))\n ax = fig.add_subplot(111)\n ax = sns.barplot(data = df, x = x_col, y = col_to_plot, ax = ax)\n #just using text.set_rotation(45) doesn't allow you to align wrt the axis\n xlabels = df[x_col]\n #lining the right side of text box up with tick looks best of options, still not great\n ax.set_xticklabels(xlabels, rotation = 45, ha = 'right')\n plt.tight_layout()\n\n return {'df':df, 'fig': fig, 'ax': ax}\n\ndef stacked_bar(df, cols = None, label_index_level = None, axis_title_suffix = '', title = '', limits = None, ticklabels = None, **kwargs):\n #this scales the width of the bars but the legend is still plotted on top of the bars\n #normalize values, this will change the col_to_plot\n x_col = cols[0]\n y_cols = cols[1:]\n\n width = 4\n height = 4\n fig = plt.figure(figsize = (width, height))\n ax = fig.add_subplot(111)\n\n sns.barplot(data = df, x = x_col, y = y_cols[0], color = 'red', ax = ax)\n #need to pass 'cat_labels' to kwargs in order of cols\n bars = [plt.Rectangle((0,0),1,1,fc=\"red\", edgecolor = 'none')]\n for i in range(1, len(y_cols)):\n bottom_plot = sns.barplot(data = df, x = x_col, y = y_cols[i], color = 'blue', ax = ax)\n bars.append(plt.Rectangle((0,0),1,1,fc='#0000A3', edgecolor = 'none'))\n\n l = plt.legend(bars, kwargs['cat_labels'], loc = (1.04, 0.75), prop={'size':16})\n l.draw_frame(False)\n return {'fig': fig, 'ax': ax, 'bars':bars, 'extra_artists': [l]}\n\ndef plot_genomic_region(coverage, chrom, start, end, strand, positions = None):\n '''\n Given a genomic region and coverage HTSeq GA, plot reads mapping to that region\n [start, end) 0-based\n Also and option to pass a postion file which will then mark regions that overlap with those positions\n '''\n window = HTSeq.GenomicInterval(chrom, start, end, strand)\n wincvg = np.fromiter(coverage[window], dtype='i')\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(range(start, end), wincvg)\n ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))\n ax.set_ylim(bottom=0)\n ax.set_xlim(start, end)\n plt.xticks(rotation=45)\n ax.set_ylabel('read counts')\n ax.set_xlabel('position')\n\n return ax\n\ndef limit_df(df, colnames = None, min_val = None, max_val = None):\n '''\n Filter out data that is outside given min and max values\n '''\n\n for col in colnames:\n df = df[df[col].between(min_val, max_val, inclusive = True)].copy()\n\n df.dropna(subset = colnames, inplace = True)\n\n return df\n\ndef clean_df(df, cols = None):\n '''\n Replace inf values with nan & drop nan-containing rows over given columns\n This needs to be done before analysis and/or after log-transform\n '''\n #print('cleaning')\n df.replace([np.inf, -np.inf], np.nan, inplace = True)\n df.dropna(axis = 0, how = 'any', subset = cols, inplace = True)\n\ndef cdf_plot(df, x_col = None, bg_group = 'all', group_col = None, nbins = 100, x_label = None, y_label = None, filename = None, logbase = None, title = None):\n '''\n Plot data in given column and a group_by variable as CDF\n x_col = the name of the column containing the data\n group_col = the name of the column containing the classification variable, e.g. short, long\n nbins = number of bins to use\n '''\n fig = plt.figure(figsize = (8, 8))\n ax = fig.add_subplot(111)\n\n groups = df[group_col].unique()\n groups = sorted(groups)\n\n #bring bg group to front if we are not using a specific bg group\n if bg_group != 'all':\n groups.insert(0, groups.pop(groups.index(bg_group)))\n first_df = None\n #build first df from first group\n #otherwise the first group will be the whole dataset, named 'all'\n else:\n groups.pop(groups.index('bg'))\n groups.insert(0, 'all')\n first_df = df\n\n all_bins = []\n all_cdfs = []\n handles = []\n labels = []\n\n for i in range(0, len(groups)):\n if i == 0:\n if first_df is not None:\n sub_df = first_df\n else:\n sub_df = df[df[group_col] == groups[i]].copy()\n else:\n sub_df = df[df[group_col] == groups[i]].copy()\n\n data = sub_df[x_col]\n counts, bin_edges = np.histogram(data, bins = nbins, normed = True)\n cdf = np.cumsum(counts)\n all_cdfs.append(cdf)\n all_bins.append(bin_edges)\n l, = plt.plot(bin_edges[1:], cdf/cdf[-1])\n handles.append(l)\n\n if i == 0:\n if bg_group != 'all':\n bg_data = data\n else:\n bg_data = df[x_col]\n labels.append('{:}, n={:}'.format(groups[i], len(data)))\n else:\n stat, pval = sp.stats.ks_2samp(bg_data, data)\n labels.append('{:}, n={:}, p={:.2E}'.format(groups[i], len(data), Decimal(pval)))\n\n plt.legend(handles, labels)\n\n #ax.text(0.1, 0.9, 'r2 = %1.3f\\nn = %s' % (r2_val, num_plotted), transform = ax.transAxes, fontsize = 12)\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n plt.title(title)\n if filename != None:\n plt.savefig('%s.png' % filename)\n plt.close(fig)\n else:\n return ax\n\ndef barhist_plot(df, cols = None, label_index_level = None, axis_title_suffix = '', filename = None, title = '', limits = None, ticklabels = None):\n '''\n Make histogram with bars\n I think this is currently only compatible with plotting a single histogram on the axis\n '''\n #mu = np.median(to_plot_x)\n #sigma = np.std(to_plot_x)\n normed = False\n\n fig = plt.figure(figsize = (8,8))\n ax = fig.add_subplot(111)\n # the histogram of the data\n ##n, bins, patches = plt.hist(df[cols], 100, normed=normed, facecolor='blue', alpha=0.8, histtype = 'bar')\n\n # add a 'best fit' line\n #y = mlab.normpdf( bins, mu, sigma)\n nbins = 100\n ax.hist(df[cols], bins = nbins)\n ##ax = plt.plot(bins, patches, 'r--', linewidth=1)\n return {'ax': ax}\n\ndef barhist_plot2(df, cols = None, label_index_level = None, axis_title_suffix = '', title = ''):\n '''\n Make histogram with bars\n I think this is currently only compatible with plotting a single histogram on the axis\n '''\n #mu = np.median(to_plot_x)\n #sigma = np.std(to_plot_x)\n\n # the histogram of the data\n n, bins, patches = plt.hist(df[cols], 100, normed=normed, facecolor='blue', alpha=0.8, histtype = 'bar')\n\n # add a 'best fit' line\n #y = mlab.normpdf( bins, mu, sigma)\n\n ax = plt.plot(bins, y, 'r--', linewidth=1)\n return {'ax': ax}\n\ndef seaborn_box(df, cols = None, label_index_level = None, axis_title_suffix = '', title = '', limits = None, ticklabels = None):\n '''Seaborn style box plot'''\n sns.set(style = 'ticks')\n fig = plt.figure(figsize = (8,8))\n ax = sns.boxplot(data = df[cols])\n fig = ax.get_figure()\n sns.despine(offset = 10)\n\n return {'ax':ax, 'fig': fig}\n\ndef scatter_plot(df, cols = None, label_index_level = None, axis_title_suffix = '', title = '', limits = None, ticklabels = None, **kwargs):\n '''\n Make a multiscatter plot of all the combinations in given columns\n Also retun correlations for each\n '''\n #store results of each replicate as correlation dict\n corr_dict = {}\n num_plotted = len(df)\n fig = plt.figure(figsize = (8,8))\n xname = cols[0]\n yname = cols[1]\n corr = df[xname].corr(df[yname])\n r2_val = corr**2\n\n ax = fig.add_subplot(111)\n ax.scatter(df[xname], df[yname], color = 'k', s = 10)\n ax.text(0.1, 0.9, 'r2 = %1.3f\\nn = %s' % (r2_val, num_plotted), transform = ax.transAxes)\n\n #label_axes(ax, xname = xname, yname = yname, label_index_level = label_index_level, axis_title_suffix = axis_title_suffix)\n\n return {'num_plotted': num_plotted, 'fig': fig, 'ax': ax}\n\ndef reg_plot(df, cols = None, label_index_level = None, axis_title_suffix = '', title = '', limits = None, ticklabels = None, **kwargs):\n '''\n Make a scatterplot using seaborn's regplot\n '''\n #store results of each replicate as correlation dict\n corr_dict = {}\n num_plotted = len(df)\n fig = plt.figure(figsize = (8,8))\n xname = cols[0]\n yname = cols[1]\n corr = df[xname].corr(df[yname])\n r2_val = corr**2\n\n ax = fig.add_subplot(111)\n ax = sns.regplot(data = df, x = xname, y = yname, fit_reg = False)\n #ax.scatter(df[xname], df[yname], color = 'k', s = 10)\n ax.text(0.1, 0.9, 'r2 = %1.3f\\nn = %s' % (r2_val, num_plotted), transform = ax.transAxes)\n set_lim(ax, limits = limits)\n\n #label_axes(ax, xname = xname, yname = yname, label_index_level = label_index_level, axis_title_suffix = axis_title_suffix)\n\n return {'num_plotted': num_plotted, 'fig': fig, 'ax': ax}\n\n\ndef multiscatter_plot(df, cols = None, label_index_level = None, axis_title_suffix = '', filename = None, title = '', limits = None, ticklabels = None):\n '''\n Make a multiscatter plot of all the combinations in given columns\n Also retun correlations for each\n '''\n #store results of each replicate as correlation dict\n corr_dict = {}\n num_plotted = len(df)\n pairs = [pair for pair in itertools.combinations(range(len(cols)), 2)]\n fig = plt.figure(figsize = (8,8))\n n = 1\n for pair in pairs:\n xi = pair[0]\n yi = pair[1]\n xname = cols[pair[0]]\n yname = cols[pair[1]]\n corr = df[xname].corr(df[yname])\n r2_val = corr**2\n corr_dict['%s_v_%s' % (yname[label_index_level], xname[label_index_level])] = r2_val\n ax = fig.add_subplot(len(pairs) - 1, len(pairs) - 1, n)\n ax.scatter(df[xname], df[yname], color = 'k', s = 10)\n ax.text(0.1, 0.8, 'r2 = %1.3f\\nn = %s' % (r2_val, num_plotted), transform = ax.transAxes)\n ax.set_xlabel('%s %s' % (xname[label_index_level], axis_title_suffix))\n ax.set_ylabel('%s %s' % (yname[label_index_level], axis_title_suffix))\n set_lim(ax, limits = limits)\n #put this in temporarily to see if it will draw it now:\n #fig.canvas.draw()\n set_ticklabels(ax, fig, ticklabels = ticklabels)\n\n n += 1\n\n return {'ax': ax, 'corr_dict': corr_dict, 'num_plotted': num_plotted, 'fig': fig}\n\n\ndef log_transform(df, cols = None, logbase = None, label_index_level = None):\n '''Transform columns with given logbase and return new df and column names'''\n if logbase in np_log_transforms:\n logcols = []\n for col in cols:\n #if col is a tuple, we're dealing with a hierarchical index\n #for now I'm assuming the next level (label_index_level + 1) will have the name of the data to transform\n\n #if label_index_level = 0, indicates that we are plotting different reps against each other\n #if label_index_level = 1, indicates that we are plotting different columns from same rep against each other\n #maybe this should be made more flexible in the future\n if type(col) == tuple:\n newcol = (col[0], '%s_log' % col[1])\n else:\n newcol = '%s_log' % col\n\n df[newcol] = df[col].apply(np_log_transforms[logbase])\n logcols.append(newcol)\n else:\n raise NotImplementedError('logbase %s not supported' % logbase)\n\n #shouldn't need to retun df, as it should be modified here\n return logcols\n\ndef label_axes(ax, xname = None, yname = None, label_index_level = None, axis_title_suffix = ''):\n '''\n label x and y-axes of plot\n If passed with only xname or yname, probably a 1D plot, only label that axis\n '''\n #if it's a multiIndex df, we'd like to specify which level to use,\n #but if it's a single index, then this is going to slice the string which is not what we want\n if type(xname) == tuple:\n ax.set_xlabel('%s %s' % (xname[label_index_level], axis_title_suffix))\n ax.set_ylabel('%s %s' % (yname[label_index_level], axis_title_suffix))\n else:\n ax.set_xlabel('%s %s' % (xname, axis_title_suffix))\n ax.set_ylabel('%s %s' % (yname, axis_title_suffix))\n\ndef set_lim(ax, limits = None):\n if limits != None:\n if 'x' in limits:\n ax.set_xlim(limits['x'][0], limits['x'][1])\n if 'y' in limits:\n ax.set_ylim(limits['y'][0], limits['y'][1])\n\ndef set_ticklabels(ax, fig, ticklabels = None):\n '''\n Swap out default tick labels for custom\n '''\n if ticklabels != None:\n #without calling canvas.draw(), ticklabels may all be set to ''\n fig.canvas.draw()\n if 'xlabel' in ticklabels:\n for tick in ax.get_xticklabels():\n newlabel = ticklabels['xlabel'].get(tick.get_text(), '')\n tick.set_text(newlabel)\n ax.set_xticklabels(ax.get_xticklabels())\n if 'ylabel' in ticklabels:\n for tick in ax.get_yticklabels():\n newlabel = ticklabels['ylabel'].get(tick.get_text(), '')\n tick.set_text(newlabel)\n ax.set_yticklabels(ax.get_yticklabels())\n\ndef save(fig, filename = None, title = '', figformat = 'png', extra_artists = None):\n '''\n Save figure\n '''\n #extra_artists = extra_artists[0]\n #plt.tight_layout()\n plt.suptitle(title)\n #plt.subplots_adjust(top = 0.9)\n plt.savefig('%s.%s' % (filename, figformat), bbox_extra_artists = (extra_artists), bbox_inches = 'tight')\n ##plt.savefig('%s.%s' % (filename, figformat))\n ##plt.close(fig)\n\ndef filter_df(df, filter_col = None):\n '''\n Remove rows that are not = True in this column\n If filter_col is actually a list, e.g. [('rep1', 'filter'), ('rep2', 'filter'),...]\n Then this is probably from a multiIndex and test if they all match filter\n '''\n\n #test if there are one or two levels in df:\n if type(filter_col) == list:\n filter_mask = df.loc[:, filter_col].all(axis = 1)\n df = df[filter_mask].copy()\n\n #only 1 level of indexing\n else:\n df = df[df[filter_col] == True].copy()\n\n ##if you don't return the copy made here, then it won't update the one we're working on\n #is there a pandas command to directly modify the copy inplace rather than creating yet another copy?\n return df\n\ndef add_text(ax, s, x, y):\n '''\n note: Adding this because doing it after return seems to put in different place\n '''\n ax.text(x, y, s, transform = ax.transAxes)\n\ndef plot(df, cols = None, plottype = None, logbase = None, title = '', label_index_level = 0, axis_title_suffix = '', filter_col = None, filename = None, limits = None, figformat = 'png', ticklabels = None, labels = None, **kwargs):\n '''\n Given a df and plottype, clean up data and then send to plotting function\n df = pandas dataframe, cols = list of columns with data, plottype = 'scatter', etc.\n filter_column = name of column whose values need to be True to include in the analysis\n limits = {'x':[-1, 1], 'y':[-1, 1]}, for a 1D plot, only use x\n labels = {'ylabel':ylabel, 'xlabel':xlabel} #will overwrite any inferred labels from the column names\n ticklabels = {'xticks':[tick1, tick2,...], 'yticks':[tick1, tick2, ...]} #will overwrite any existing ticklabels\n '''\n #make a copy of the df here. All subsequent operations will modify this copy\n df = df.copy()\n\n if filter_col != None:\n df = filter_df(df, filter_col = filter_col)\n\n df = df[cols].copy()\n clean_df(df, cols = cols)\n\n #get new log-transformed columns if required\n if logbase != None:\n cols = log_transform(df, cols = cols, logbase = logbase, label_index_level = label_index_level)\n clean_df(df, cols = cols)\n\n #send to plotting fxn, which will return plot-specific analyses in the results dict\n results = plot_fxn_dict[plottype](df, cols = cols, title = title, label_index_level = label_index_level, axis_title_suffix = axis_title_suffix, limits = limits, ticklabels = ticklabels, **kwargs)\n\n #formatting, because this works by axis, this is called separately in the multiaxis functions\n if plottype not in ['multiscatter', 'multiscatter2']:\n if limits != None:\n set_lim(results['ax'], limits = limits)\n\n if ticklabels != None:\n #without calling canvas.draw(), ticklabels may all be set to ''\n set_ticklabels(results['ax'], results['fig'], ticklabels = ticklabels)\n\n\n #this doesn't work:(, get_xticklabels not defined. Dang... How can you accomplish this then?\n\n if labels != None:\n if 'xlabel' in labels:\n results['ax'].set_xlabel(labels['xlabel'])\n if 'ylabel' in labels:\n results['ax'].set_ylabel(labels['ylabel'])\n\n #todo: maybe set an option to print the ticklabels\n #as with different ranges will be really difficult to tell what the actual labels will be in advance\n if 'text' in kwargs:\n add_text(results['ax'], *kwargs['text'])\n\n if 'ax_loc' in kwargs:\n loc = plticker.MultipleLocator(base = float(kwargs['ax_loc'])) # this locator puts ticks at regular intervals\n results['ax'].yaxis.set_major_locator(loc)\n results['ax'].xaxis.set_major_locator(loc)\n\n if 'xy_line' in kwargs:\n lims = [\n np.min([results['ax'].get_xlim(), results['ax'].get_ylim()]),\n np.max([results['ax'].get_xlim(), results['ax'].get_ylim()])\n ]\n # min of both axes # max of both axes\n results['ax'].plot(lims, lims, 'k-', linestyle = '--')\n\n #hacky fix for now, but should make a default results_dict with these keys\n if 'extra_artists' not in results:\n results['extra_artists'] = None\n\n save(results['fig'], filename = filename, title = title, figformat = figformat, extra_artists = results['extra_artists'])\n #avoid cropping legend on file save: https://stackoverflow.com/questions/10101700/moving-matplotlib-legend-outside-of-the-axis-makes-it-cutoff-by-the-figure-box\n #results['fig'].savefig(filename, bbox_extra_artists=(l,), bbox_inches='tight')\n\n #Uncomment for normal use but while debugging, if you close, will not display in Jupyter\n plt.close(results['fig'])\n\n #print('saved fig')\n return results\n\nplot_fxn_dict = {'multiscatter':multiscatter_plot, 'scatter':scatter_plot, 'box':seaborn_box, 'hist':barhist_plot, 'quickbar':quick_barplot, 'regplot': reg_plot, 'stacked_bar': stacked_bar}\nnp_log_transforms = {2: np.log2, 10: np.log10}\n\n#https://stackoverflow.com/questions/41122923/getting-empty-tick-labels-before-showing-a-plot-in-matplotlib\n","repo_name":"marykthompson/rateseq_pipelines","sub_path":"common_scripts/pipe_utils/pipeline_aux.py","file_name":"pipeline_aux.py","file_ext":"py","file_size_in_byte":21092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"15174593099","text":"import math\r\n\r\nN = int(input())\r\npt = 1\r\npa = 1\r\nfor i in range(N):\r\n t, a = map(int, input().split())\r\n\r\n t_ratio = pt // t\r\n a_ratio = pa // a\r\n if pt % t != 0:\r\n t_ratio += 1\r\n if pa % a != 0:\r\n a_ratio += 1\r\n min_ratio = max(t_ratio, a_ratio)\r\n pt = t * min_ratio\r\n pa = a * min_ratio\r\n\r\nprint(pt+pa)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc062/A/4842588.py","file_name":"4842588.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"}
+{"seq_id":"16730333171","text":"#!/usr/bin/python\n# -*- coding: utf8 -*-\nimport logging\n\n\ndef setup_logger(logger_name, log_file, level=logging.DEBUG): #.INFO\n lz = logging.getLogger(logger_name)\n \n #formatter = logging.Formatter('%(asctime)s : %(message)s')\n formatter =logging.Formatter('Date-Time : %(asctime)s : Line No. : %(lineno)d - %(name)s- %(levelname)s - %(message)s')\n fileHandler = logging.FileHandler(log_file, mode='w')\n fileHandler.setFormatter(formatter)\n lz.setLevel(level)\n lz.addHandler(fileHandler)\n streamHandler = logging.StreamHandler()\n streamHandler.setFormatter(formatter)\n lz.addHandler(streamHandler) #-> si se activa sale por pantalla\n\n\ndef setup_logger2(logger_name, log_file, consoleLevel=logging.DEBUG, fileLevel=logging.DEBUG):\n lz = logging.getLogger(logger_name)\n # Create handlers\n # Console\n consoleHandler = logging.StreamHandler()\n consoleHandler.setFormatter(logging.Formatter('%(name)s - %(message)s'))\n consoleHandler.setLevel(consoleLevel)\n\n # File\n log_file = log_file\n fileHandler = logging.FileHandler(log_file, mode='w')\n fileHandler.setFormatter(logging.Formatter(\n 'Date-Time : %(asctime)s : Line No. : %(lineno)d - %(name)s- %(process)d - %(levelname)s - %(message)s'))\n fileHandler.setLevel(fileLevel)\n\n lz.addHandler(consoleHandler)\n lz.addHandler(fileHandler)\n\n lz.debug(\"test mensaje debug\")\n lz.info(\"test mensaje info\")\n lz.warning(\"test mensaje warning\")\n lz.critical(\"test mensaje critical\")\n return\n # los unicos mensajes que se graban son de nivel warning para arriba\n","repo_name":"pjseoane/FIX-Rofex2020.1","sub_path":"Logger/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"34912347136","text":"#!/usr/bin/python3\n\n\"\"\"module containing 'MyList' Class\"\"\"\n\n\nclass MyList(list):\n \"\"\"class MyList that inherits 'list' class\"\"\"\n\n def print_sorted(self):\n \"\"\"prints the list, but sorted (ascending sort)\"\"\"\n list_cpy = self.copy()\n list_cpy.sort()\n print(list_cpy)\n","repo_name":"Fadyy22/alx-higher_level_programming","sub_path":"0x0A-python-inheritance/1-my_list.py","file_name":"1-my_list.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"74445729833","text":"import time\nimport requests\nimport random\nfrom bs4 import BeautifulSoup\nfrom helper import printer\nfrom utils.randomuser import users\n\n\nclass Search:\n \"\"\"\n Searches for a given query on DuckDuckGo.\n\n :param query: The query to search for.\n \"\"\"\n def __init__(self, query):\n url = \"https://duckduckgo.com/html/?q=\" + query\n headers = {\"User-Agent\": random.choice(users)}\n\n try:\n with requests.get(url, headers=headers) as response:\n response.raise_for_status() # Raise exception if request fails\n\n soup = BeautifulSoup(response.text, \"html.parser\")\n results = soup.find_all(\"div\", {\"class\": \"result__body\"})\n\n if len(results) == 0:\n printer.error(f\"No results found for '{query}'..!\")\n return\n\n printer.info(f\"Searching for '{query}' -- With the agent '{headers['User-Agent']}'\")\n time.sleep(1)\n for result in results:\n self.print_search_result(result)\n\n except requests.exceptions.RequestException as e:\n printer.error(f\"Error: {e}\")\n except KeyboardInterrupt:\n printer.error(\"Cancelled..!\")\n\n def print_search_result(self, result):\n \"\"\"\n Prints the result of a search.\n\n :param result: The result to print.\n \"\"\"\n title = result.find(\"a\", {\"class\": \"result__a\"}).text\n link = result.find(\"a\", {\"class\": \"result__a\"})[\"href\"]\n status_code = self.get_status_code(link)\n printer.success(f\"'{title}' - {link} - [{status_code}]\")\n\n @staticmethod\n def get_status_code(url):\n \"\"\"\n Retrieves the status code of a given URL.\n\n :param url: The URL to check.\n :return: The status code if the request is successful, or None otherwise.\n \"\"\"\n try:\n with requests.get(url, stream=True) as response:\n response.raise_for_status()\n return response.status_code\n except requests.exceptions.RequestException:\n return None\n","repo_name":"Manan2401/Recon_All","sub_path":"utils/websearch.py","file_name":"websearch.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}